Beispiel #1
0
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_turbofy_rate2, self).__init__(args)
        self.args = args

        # Encoder
        self.enc_rnn_1 = torch.nn.GRU(1,
                                      args.enc_num_unit,
                                      num_layers=args.enc_num_layer,
                                      bias=True,
                                      batch_first=True,
                                      dropout=0,
                                      bidirectional=True)

        self.enc_linear_1 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_2 = torch.nn.GRU(1,
                                      args.enc_num_unit,
                                      num_layers=args.enc_num_layer,
                                      bias=True,
                                      batch_first=True,
                                      dropout=0,
                                      bidirectional=True)

        self.enc_linear_2 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)
Beispiel #2
0
    def __init__(self, args, p_array):
        super(ENC_interRNN_sys, self).__init__(args)
        self.args = args

        # Encoder
        self.enc_rnn = torch.nn.GRU(1,
                                    args.enc_num_unit,
                                    num_layers=args.enc_num_layer,
                                    bias=True,
                                    batch_first=True,
                                    dropout=0,
                                    bidirectional=True)

        self.enc_linear = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_int = torch.nn.GRU(1,
                                        args.enc_num_unit,
                                        num_layers=args.enc_num_layer,
                                        bias=True,
                                        batch_first=True,
                                        dropout=0,
                                        bidirectional=True)

        self.enc_linear_int = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)
Beispiel #3
0
    def __init__(self, args, p_array):
        super(DEC_LargeCNN_rate2, self).__init__()
        self.args = args

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.interleaver          = Interleaver(args, p_array)
        self.deinterleaver        = DeInterleaver(args, p_array)

        self.dec1_cnns      = torch.nn.ModuleList()
        self.dec2_cnns      = torch.nn.ModuleList()
        self.dec1_outputs   = torch.nn.ModuleList()
        self.dec2_outputs   = torch.nn.ModuleList()

        for idx in range(args.num_iteration):
            self.dec1_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft,
                                                  out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size)
            )

            self.dec2_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft,
                                                  out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size)
            )
            self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))

            if idx == args.num_iteration -1:
                self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, 1))
            else:
                self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))
Beispiel #4
0
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(CNN_encoder_rate3, self).__init__(args)
        self.args = args

        # Encoder

        self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.enc_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.enc_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.enc_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)
Beispiel #5
0
    def __init__(self, args, p_array):
        super(DEC_LargeRNN_rate2, self).__init__()
        self.args = args

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.interleaver          = Interleaver(args, p_array)
        self.deinterleaver        = DeInterleaver(args, p_array)

        self.dec1_rnns      = torch.nn.ModuleList()
        self.dec2_rnns      = torch.nn.ModuleList()
        self.dec1_outputs   = torch.nn.ModuleList()
        self.dec2_outputs   = torch.nn.ModuleList()

        for idx in range(args.num_iteration):

            self.dec1_rnns.append(torch.nn.GRU(1 + args.num_iter_ft,  args.dec_num_unit,
                                               num_layers=2, bias=True, batch_first=True,
                                               dropout=args.dropout, bidirectional=True))

            self.dec2_rnns.append(torch.nn.GRU(1+ args.num_iter_ft,  args.dec_num_unit,
                                           num_layers=2, bias=True, batch_first=True,
                                           dropout=args.dropout, bidirectional=True))

            self.dec1_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))

            if idx == args.num_iteration -1:
                self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, 1))
            else:
                self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))
Beispiel #6
0
class ENC_interCNN(ENCBase):
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_interCNN, self).__init__(args)
        self.args = args

        # Encoder

        self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.enc_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.enc_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.enc_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)

    def set_parallel(self):
        self.enc_cnn_1 = torch.nn.DataParallel(self.enc_cnn_1)
        self.enc_cnn_2 = torch.nn.DataParallel(self.enc_cnn_2)
        self.enc_cnn_3 = torch.nn.DataParallel(self.enc_cnn_3)
        self.enc_linear_1 = torch.nn.DataParallel(self.enc_linear_1)
        self.enc_linear_2 = torch.nn.DataParallel(self.enc_linear_2)
        self.enc_linear_3 = torch.nn.DataParallel(self.enc_linear_3)

    def forward(self, inputs):
        inputs = 2.0 * inputs - 1.0
        x_sys = self.enc_cnn_1(inputs)
        x_sys = self.enc_act(self.enc_linear_1(x_sys))

        x_p1 = self.enc_cnn_2(inputs)
        x_p1 = self.enc_act(self.enc_linear_2(x_p1))

        x_sys_int = self.interleaver(inputs)

        x_p2 = self.enc_cnn_3(x_sys_int)
        x_p2 = self.enc_act(self.enc_linear_3(x_p2))

        x_tx = torch.cat([x_sys, x_p1, x_p2], dim=2)

        codes = self.power_constraint(x_tx)

        return codes
Beispiel #7
0
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_interCNN2Int, self).__init__(args)
        self.args             = args

        # Encoder

        self.enc_cnn_1       = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k,
                                                  out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size)


        self.enc_linear_1    = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_2       = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k,
                                                  out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size)

        self.enc_linear_2    = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_3       = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k,
                                                  out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size)

        self.enc_linear_3    = torch.nn.Linear(args.enc_num_unit, 1)

        self.interleaver1      = Interleaver(args, p_array)


        seed2 = 1000
        rand_gen2 = mtrand.RandomState(seed2)
        p_array2 = rand_gen2.permutation(arange(args.block_len))

        print('p_array1', p_array)
        print('p_array2', p_array2)

        self.interleaver2      = Interleaver(args, p_array2)
Beispiel #8
0
class ENC_interRNN_sys(ENCBase):
    def __init__(self, args, p_array):
        super(ENC_interRNN_sys, self).__init__(args)
        self.args = args

        # Encoder
        self.enc_rnn = torch.nn.GRU(1,
                                    args.enc_num_unit,
                                    num_layers=args.enc_num_layer,
                                    bias=True,
                                    batch_first=True,
                                    dropout=0,
                                    bidirectional=True)

        self.enc_linear = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_int = torch.nn.GRU(1,
                                        args.enc_num_unit,
                                        num_layers=args.enc_num_layer,
                                        bias=True,
                                        batch_first=True,
                                        dropout=0,
                                        bidirectional=True)

        self.enc_linear_int = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)

    def set_parallel(self):
        self.enc_rnn = torch.nn.DataParallel(self.enc_rnn)
        self.enc_linear = torch.nn.DataParallel(self.enc_linear)

        self.enc_rnn_int = torch.nn.DataParallel(self.enc_rnn_int)
        self.enc_linear_int = torch.nn.DataParallel(self.enc_linear_int)

    def forward(self, inputs):

        x_sys = 2.0 * inputs - 1.0

        x_p1, _ = self.enc_rnn(inputs)
        x_p1 = self.enc_act(self.enc_linear(x_p1))

        x_sys_int = self.interleaver(inputs)

        x_p2, _ = self.enc_rnn_int(x_sys_int)
        x_p2 = self.enc_act(self.enc_linear_int(x_p2))

        x_tx = torch.cat([x_p1, x_p2], dim=2)

        x_tx = self.power_constraint(x_tx)

        codes = torch.cat([x_sys, x_tx], dim=2)

        return codes
Beispiel #9
0
class ENC_turbofy_rate2(ENCBase):
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_turbofy_rate2, self).__init__(args)
        self.args = args

        # Encoder
        self.enc_rnn_1 = torch.nn.GRU(1,
                                      args.enc_num_unit,
                                      num_layers=args.enc_num_layer,
                                      bias=True,
                                      batch_first=True,
                                      dropout=0,
                                      bidirectional=True)

        self.enc_linear_1 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_2 = torch.nn.GRU(1,
                                      args.enc_num_unit,
                                      num_layers=args.enc_num_layer,
                                      bias=True,
                                      batch_first=True,
                                      dropout=0,
                                      bidirectional=True)

        self.enc_linear_2 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)

    def set_parallel(self):
        self.enc_rnn_1 = torch.nn.DataParallel(self.enc_rnn_1)
        self.enc_linear_1 = torch.nn.DataParallel(self.enc_linear_1)
        self.enc_rnn_2 = torch.nn.DataParallel(self.enc_rnn_2)
        self.enc_linear_2 = torch.nn.DataParallel(self.enc_linear_2)

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)

    def forward(self, inputs):
        x_sys, _ = self.enc_rnn_1(inputs)
        x_sys = self.enc_act(self.enc_linear_1(x_sys))

        x_sys_int = self.interleaver(inputs)

        x_p2, _ = self.enc_rnn_2(x_sys_int)
        x_p2 = self.enc_act(self.enc_linear_2(x_p2))

        x_tx = torch.cat([x_sys, x_p2], dim=2)

        codes = self.power_constraint(x_tx)
        return codes
Beispiel #10
0
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_interRNN, self).__init__(args)

        self.enc_rnns = torch.nn.ModuleList()

        self.args = args

        # Encoder

        if args.enc_rnn == 'gru':
            RNN_MODEL = torch.nn.GRU
        elif args.enc_rnn == 'lstm':
            RNN_MODEL = torch.nn.LSTM
        else:
            RNN_MODEL = torch.nn.RNN

        self.enc_rnn_1 = RNN_MODEL(1,
                                   args.enc_num_unit,
                                   num_layers=args.enc_num_layer,
                                   bias=True,
                                   batch_first=True,
                                   dropout=0,
                                   bidirectional=True)

        self.enc_linear_1 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_2 = RNN_MODEL(1,
                                   args.enc_num_unit,
                                   num_layers=args.enc_num_layer,
                                   bias=True,
                                   batch_first=True,
                                   dropout=0,
                                   bidirectional=True)

        self.enc_linear_2 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_3 = RNN_MODEL(1,
                                   args.enc_num_unit,
                                   num_layers=args.enc_num_layer,
                                   bias=True,
                                   batch_first=True,
                                   dropout=0,
                                   bidirectional=True)

        self.enc_linear_3 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)
Beispiel #11
0
    def __init__(self, args, p_array):
        super(NeuralTurbofyDec, self).__init__()

        self.args             = args

        self.interleaver          = Interleaver(args, p_array)
        self.deinterleaver        = DeInterleaver(args, p_array)

        self.dec_rnn  = torch.nn.GRU(args.code_rate_n + args.num_iter_ft - 1 , args.dec_num_unit, num_layers=2, bias=True, batch_first=True,
                                   dropout=args.dropout, bidirectional=True)
        self.dec_out = torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft)

        self.dec_final = torch.nn.Linear(args.num_iter_ft, 1)

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
Beispiel #12
0
    def __init__(self, args, p_array):
        super(TurboAE_decoder1D, self).__init__()
        self.args = args
        cuda = True if torch.cuda.is_available() else False
        self.this_device = torch.device("cuda" if cuda else "cpu")

        self.interleaver          = Interleaver(args, p_array)
        self.deinterleaver        = DeInterleaver(args, p_array)

        self.dec1_cnns      = torch.nn.ModuleList()
        self.dec2_cnns      = torch.nn.ModuleList()
        self.dec1_outputs   = torch.nn.ModuleList()
        self.dec2_outputs   = torch.nn.ModuleList()

        for idx in range(args.num_iteration):
            self.dec1_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft,
                                                  out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size)
            )

            self.dec2_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft,
                                                  out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size)
            )
            self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))

            if idx == args.num_iteration -1:
                self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.code_rate_k))
            else:
                self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))

        # also need some CNN for f
        self.ftstart =  SameShapeConv2d(num_layer=args.dec_num_layer, in_channels=args.img_channels,
                                                  out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size)
        self.ftend   =  SameShapeConv2d(num_layer=1, in_channels=args.dec_num_unit,
                                                  out_channels= args.img_channels, kernel_size = args.dec_kernel_size)
Beispiel #13
0
    def __init__(self,
                 args,
                 input_size=1,
                 is_systematic_bit=False,
                 is_interleave=False,
                 p_array=[]):
        super(CNN_encoder, self).__init__()

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")
        self.args = args

        self.is_systematic_bit = is_systematic_bit
        self.is_interleave = is_interleave

        if self.is_interleave:
            self.interleaver = Interleaver(args, p_array)

        self.p_array = p_array

        # Encoder
        self.enc_cnn = SameShapeConv1d(num_layer=args.enc_num_layer,
                                       in_channels=input_size,
                                       out_channels=args.enc_num_unit,
                                       kernel_size=args.enc_kernel_size)
        self.enc_linear = torch.nn.Linear(args.enc_num_unit, 1)
Beispiel #14
0
    def __init__(self, args, p_array):
        super(FTAE_encoder, self).__init__()

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.args = args
        # interleaver
        self.p_array = p_array
        self.interleaver          = Interleaver(args, p_array)

        if self.args.cnn_type == 'dense':
            CNNModel = DenseSameShapeConv1d
        else:
            CNNModel = SameShapeConv1d

        # Encoder
        self.enc_cnn_1       = CNNModel(num_layer=args.enc_num_layer, in_channels=args.code_rate_k,
                                                  out_channels= args.enc_num_unit, kernel_size = args.enc_kernel_size)
        self.enc_linear_1    = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_2       = CNNModel(num_layer=args.enc_num_layer, in_channels=args.code_rate_k + 2,
                                                  out_channels= args.enc_num_unit, kernel_size = args.enc_kernel_size)

        self.enc_linear_2    = torch.nn.Linear(args.enc_num_unit, 1)

        self.enc_cnn_3       = CNNModel(num_layer=args.enc_num_layer, in_channels=args.code_rate_k + 4,
                                                  out_channels= args.enc_num_unit, kernel_size = args.enc_kernel_size)
        self.enc_linear_3    = torch.nn.Linear(args.enc_num_unit, 1)
Beispiel #15
0
    def __init__(self, args, p_array):
        super(FTAE_Shareddecoder, self).__init__()

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.args = args
        # interleaver
        self.p_array = p_array
        self.interleaver = Interleaver(args, p_array)
        self.deinterleaver = DeInterleaver(args, p_array)

        if args.cnn_type == 'dense':
            CNNModel = DenseSameShapeConv1d
        else:
            CNNModel = SameShapeConv1d

        self.dec1_cnns = CNNModel(num_layer=args.dec_num_layer,
                                  in_channels=2 + args.num_iter_ft,
                                  out_channels=args.dec_num_unit,
                                  kernel_size=args.dec_kernel_size)
        self.dec1_outputs = torch.nn.Linear(args.dec_num_unit,
                                            args.num_iter_ft)
        self.dec2_cnns = CNNModel(num_layer=args.dec_num_layer,
                                  in_channels=2 + args.num_iter_ft,
                                  out_channels=args.dec_num_unit,
                                  kernel_size=args.dec_kernel_size)
        self.dec2_outputs = torch.nn.Linear(args.dec_num_unit,
                                            args.num_iter_ft)

        self.final_outputs = torch.nn.Linear(args.num_iter_ft, 1)
Beispiel #16
0
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_interCNN, self).__init__(args)
        self.args = args

        # Encoder
        if self.args.encoder == 'TurboAE_rate3_cnn':
            self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                             in_channels=args.code_rate_k,
                                             out_channels=args.enc_num_unit,
                                             kernel_size=args.enc_kernel_size)

            self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                             in_channels=args.code_rate_k,
                                             out_channels=args.enc_num_unit,
                                             kernel_size=args.enc_kernel_size)

            self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                             in_channels=args.code_rate_k,
                                             out_channels=args.enc_num_unit,
                                             kernel_size=args.enc_kernel_size)
        else:  # Dense
            self.enc_cnn_1 = DenseSameShapeConv1d(
                num_layer=args.enc_num_layer,
                in_channels=args.code_rate_k,
                out_channels=args.enc_num_unit,
                kernel_size=args.enc_kernel_size)

            self.enc_cnn_2 = DenseSameShapeConv1d(
                num_layer=args.enc_num_layer,
                in_channels=args.code_rate_k,
                out_channels=args.enc_num_unit,
                kernel_size=args.enc_kernel_size)

            self.enc_cnn_3 = DenseSameShapeConv1d(
                num_layer=args.enc_num_layer,
                in_channels=args.code_rate_k,
                out_channels=args.enc_num_unit,
                kernel_size=args.enc_kernel_size)

        self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1)
        self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1)
        self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)
Beispiel #17
0
    def __init__(self, args, p_array):
        super(DEC_LargeCNN2Int, self).__init__()
        self.args = args

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.interleaver1 = Interleaver(args, p_array)
        self.deinterleaver1 = DeInterleaver(args, p_array)

        seed2 = 1000
        rand_gen2 = mtrand.RandomState(seed2)
        p_array2 = rand_gen2.permutation(arange(args.block_len))

        print('p_array1 dec', p_array)
        print('p_array2 dec', p_array2)

        self.interleaver2 = Interleaver(args, p_array2)
        self.deinterleaver2 = DeInterleaver(args, p_array2)

        self.dec1_cnns = torch.nn.ModuleList()
        self.dec2_cnns = torch.nn.ModuleList()
        self.dec1_outputs = torch.nn.ModuleList()
        self.dec2_outputs = torch.nn.ModuleList()

        for idx in range(args.num_iteration):
            self.dec1_cnns.append(
                SameShapeConv1d(num_layer=args.dec_num_layer,
                                in_channels=2 + args.num_iter_ft,
                                out_channels=args.dec_num_unit,
                                kernel_size=args.dec_kernel_size))

            self.dec2_cnns.append(
                SameShapeConv1d(num_layer=args.dec_num_layer,
                                in_channels=2 + args.num_iter_ft,
                                out_channels=args.dec_num_unit,
                                kernel_size=args.dec_kernel_size))
            self.dec1_outputs.append(
                torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))

            if idx == args.num_iteration - 1:
                self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, 1))
            else:
                self.dec2_outputs.append(
                    torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))
Beispiel #18
0
    def __init__(self, args, p_array):
        super(FTAE_decoder, self).__init__()

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.args = args
        # interleaver
        self.p_array = p_array
        self.interleaver          = Interleaver(args, p_array)
        self.deinterleaver        = DeInterleaver(args, p_array)

        # Decoder
        self.dec1_cnns      = torch.nn.ModuleList()
        self.dec2_cnns      = torch.nn.ModuleList()
        self.dec1_outputs   = torch.nn.ModuleList()
        self.dec2_outputs   = torch.nn.ModuleList()

        if args.cnn_type =='dense':
            CNNModel = DenseSameShapeConv1d
        else:
            CNNModel = SameShapeConv1d

        for idx in range(args.num_iteration):
            if self.args.codec == 'turboae_blockdelay_cnn':
                self.dec1_cnns.append(CNNModel(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft,
                                                      out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size)
                )

                self.dec2_cnns.append(CNNModel(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft,
                                                      out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size)
                )
                self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))

                if idx == args.num_iteration -1:
                    self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.code_rate_k))
                else:
                    self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))
            else: # RNN based

                self.dec1_cnns.append(torch.nn.GRU(2 + args.num_iter_ft, args.dec_num_unit,
                                      num_layers=args.dec_num_layer, bias=True, batch_first=True,
                                      dropout=0, bidirectional=True)
                )

                self.dec2_cnns.append(torch.nn.GRU(2 + args.num_iter_ft, args.dec_num_unit,
                                      num_layers=args.dec_num_layer, bias=True, batch_first=True,
                                      dropout=0, bidirectional=True)
                )
                self.dec1_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))

                if idx == args.num_iteration -1:
                    self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.code_rate_k))
                else:
                    self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))
Beispiel #19
0
class ENC_interCNN(ENCBase):
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_interCNN, self).__init__(args)
        self.args = args

        # Encoder
        if self.args.encoder == 'TurboAE_rate3_cnn':
            self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                             in_channels=args.code_rate_k,
                                             out_channels=args.enc_num_unit,
                                             kernel_size=args.enc_kernel_size)

            self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                             in_channels=args.code_rate_k,
                                             out_channels=args.enc_num_unit,
                                             kernel_size=args.enc_kernel_size)

            self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer,
                                             in_channels=args.code_rate_k,
                                             out_channels=args.enc_num_unit,
                                             kernel_size=args.enc_kernel_size)
        else:  # Dense
            self.enc_cnn_1 = DenseSameShapeConv1d(
                num_layer=args.enc_num_layer,
                in_channels=args.code_rate_k,
                out_channels=args.enc_num_unit,
                kernel_size=args.enc_kernel_size)

            self.enc_cnn_2 = DenseSameShapeConv1d(
                num_layer=args.enc_num_layer,
                in_channels=args.code_rate_k,
                out_channels=args.enc_num_unit,
                kernel_size=args.enc_kernel_size)

            self.enc_cnn_3 = DenseSameShapeConv1d(
                num_layer=args.enc_num_layer,
                in_channels=args.code_rate_k,
                out_channels=args.enc_num_unit,
                kernel_size=args.enc_kernel_size)

        self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1)
        self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1)
        self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)

    def set_parallel(self):
        self.enc_cnn_1 = torch.nn.DataParallel(self.enc_cnn_1)
        self.enc_cnn_2 = torch.nn.DataParallel(self.enc_cnn_2)
        self.enc_cnn_3 = torch.nn.DataParallel(self.enc_cnn_3)
        self.enc_linear_1 = torch.nn.DataParallel(self.enc_linear_1)
        self.enc_linear_2 = torch.nn.DataParallel(self.enc_linear_2)
        self.enc_linear_3 = torch.nn.DataParallel(self.enc_linear_3)

    def forward(self, inputs):

        if self.args.is_variable_block_len:
            block_len = inputs.shape[1]
            # reset interleaver
            if self.args.is_interleave != 0:  # fixed interleaver.
                seed = np.random.randint(0, self.args.is_interleave)
                rand_gen = mtrand.RandomState(seed)
                p_array = rand_gen.permutation(arange(block_len))
                self.set_interleaver(p_array)

        inputs = 2.0 * inputs - 1.0
        x_sys = self.enc_cnn_1(inputs)
        x_sys = self.enc_act(self.enc_linear_1(x_sys))

        x_p1 = self.enc_cnn_2(inputs)
        x_p1 = self.enc_act(self.enc_linear_2(x_p1))

        x_sys_int = self.interleaver(inputs)
        x_p2 = self.enc_cnn_3(x_sys_int)
        x_p2 = self.enc_act(self.enc_linear_3(x_p2))

        x_tx = torch.cat([x_sys, x_p1, x_p2], dim=2)

        codes = self.power_constraint(x_tx)

        return codes
Beispiel #20
0
class ENC_interRNN(ENCBase):
    def __init__(self, args, p_array):
        # turbofy only for code rate 1/3
        super(ENC_interRNN, self).__init__(args)

        self.enc_rnns = torch.nn.ModuleList()

        self.args = args

        # Encoder

        if args.enc_rnn == 'gru':
            RNN_MODEL = torch.nn.GRU
        elif args.enc_rnn == 'lstm':
            RNN_MODEL = torch.nn.LSTM
        else:
            RNN_MODEL = torch.nn.RNN

        self.enc_rnn_1 = RNN_MODEL(1,
                                   args.enc_num_unit,
                                   num_layers=args.enc_num_layer,
                                   bias=True,
                                   batch_first=True,
                                   dropout=0,
                                   bidirectional=True)

        self.enc_linear_1 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_2 = RNN_MODEL(1,
                                   args.enc_num_unit,
                                   num_layers=args.enc_num_layer,
                                   bias=True,
                                   batch_first=True,
                                   dropout=0,
                                   bidirectional=True)

        self.enc_linear_2 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.enc_rnn_3 = RNN_MODEL(1,
                                   args.enc_num_unit,
                                   num_layers=args.enc_num_layer,
                                   bias=True,
                                   batch_first=True,
                                   dropout=0,
                                   bidirectional=True)

        self.enc_linear_3 = torch.nn.Linear(2 * args.enc_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)

    def set_parallel(self):
        self.enc_rnn_1 = torch.nn.DataParallel(self.enc_rnn_1)
        self.enc_rnn_2 = torch.nn.DataParallel(self.enc_rnn_2)
        self.enc_rnn_3 = torch.nn.DataParallel(self.enc_rnn_3)
        self.enc_linear_1 = torch.nn.DataParallel(self.enc_linear_1)
        self.enc_linear_2 = torch.nn.DataParallel(self.enc_linear_2)
        self.enc_linear_3 = torch.nn.DataParallel(self.enc_linear_3)

    def forward(self, inputs):

        x_sys, _ = self.enc_rnn_1(inputs)
        x_sys = self.enc_act(self.enc_linear_1(x_sys))

        x_p1, _ = self.enc_rnn_2(inputs)
        x_p1 = self.enc_act(self.enc_linear_2(x_p1))

        x_sys_int = self.interleaver(inputs)

        x_p2, _ = self.enc_rnn_3(x_sys_int)
        x_p2 = self.enc_act(self.enc_linear_3(x_p2))

        x_tx = torch.cat([x_sys, x_p1, x_p2], dim=2)

        codes = self.power_constraint(x_tx)
        return codes
Beispiel #21
0
class DEC_LargeRNN_rate2(torch.nn.Module):
    def __init__(self, args, p_array):
        super(DEC_LargeRNN_rate2, self).__init__()
        self.args = args

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.interleaver          = Interleaver(args, p_array)
        self.deinterleaver        = DeInterleaver(args, p_array)

        self.dec1_rnns      = torch.nn.ModuleList()
        self.dec2_rnns      = torch.nn.ModuleList()
        self.dec1_outputs   = torch.nn.ModuleList()
        self.dec2_outputs   = torch.nn.ModuleList()

        for idx in range(args.num_iteration):

            self.dec1_rnns.append(torch.nn.GRU(1 + args.num_iter_ft,  args.dec_num_unit,
                                               num_layers=2, bias=True, batch_first=True,
                                               dropout=args.dropout, bidirectional=True))

            self.dec2_rnns.append(torch.nn.GRU(1+ args.num_iter_ft,  args.dec_num_unit,
                                           num_layers=2, bias=True, batch_first=True,
                                           dropout=args.dropout, bidirectional=True))

            self.dec1_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))

            if idx == args.num_iteration -1:
                self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, 1))
            else:
                self.dec2_outputs.append(torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft))

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)
        self.deinterleaver.set_parray(p_array)

    def set_parallel(self):
        for idx in range(self.args.num_iteration):
            self.dec1_rnns[idx] = torch.nn.DataParallel(self.dec1_rnns[idx])
            self.dec2_rnns[idx] = torch.nn.DataParallel(self.dec2_rnns[idx])
            self.dec1_outputs[idx] = torch.nn.DataParallel(self.dec1_outputs[idx])
            self.dec2_outputs[idx] = torch.nn.DataParallel(self.dec2_outputs[idx])

    def forward(self, received):

        # Turbo Decoder
        r_sys     = received[:,:,0].view((self.args.batch_size, self.args.block_len, 1))
        r_int     = received[:,:,1].view((self.args.batch_size, self.args.block_len, 1))

        prior = torch.zeros((self.args.batch_size, self.args.block_len, self.args.num_iter_ft)).to(self.this_device)

        for idx in range(self.args.num_iteration - 1):

            x_this_dec = torch.cat([r_sys,  prior], dim = 2)
            x_dec, _   = self.dec1_rnns[idx](x_this_dec)
            x_plr      = self.dec1_outputs[idx](x_dec)

            if self.args.extrinsic:
                x_plr = x_plr - prior

            x_plr_int  = self.interleaver(x_plr)

            x_this_dec = torch.cat([r_int, x_plr_int ], dim = 2)
            x_dec, _   = self.dec2_rnns[idx](x_this_dec)
            x_plr      = self.dec2_outputs[idx](x_dec)

            if self.args.extrinsic:
                x_plr = x_plr - x_plr_int

            prior      = self.deinterleaver(x_plr)

        # last round
        x_this_dec = torch.cat([r_sys,  prior], dim = 2)
        x_dec, _   = self.dec1_rnns[self.args.num_iteration - 1](x_this_dec)
        x_plr      = self.dec1_outputs[self.args.num_iteration - 1](x_dec)

        if self.args.extrinsic:
            x_plr = x_plr - prior

        x_plr_int  = self.interleaver(x_plr)

        x_this_dec = torch.cat([r_int, x_plr_int ], dim = 2)
        x_dec, _   = self.dec2_rnns[self.args.num_iteration - 1](x_this_dec)
        x_plr      = self.dec2_outputs[self.args.num_iteration - 1](x_dec)

        final      = torch.sigmoid(self.deinterleaver(x_plr))

        return final
Beispiel #22
0
class NeuralTurbofyDec(torch.nn.Module):
    def __init__(self, args, p_array):
        super(NeuralTurbofyDec, self).__init__()

        self.args             = args

        self.interleaver          = Interleaver(args, p_array)
        self.deinterleaver        = DeInterleaver(args, p_array)

        self.dec_rnn  = torch.nn.GRU(args.code_rate_n + args.num_iter_ft - 1 , args.dec_num_unit, num_layers=2, bias=True, batch_first=True,
                                   dropout=args.dropout, bidirectional=True)
        self.dec_out = torch.nn.Linear(2*args.dec_num_unit, args.num_iter_ft)

        self.dec_final = torch.nn.Linear(args.num_iter_ft, 1)

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")

    def enc_act(self, inputs):
        if self.enc_act == 'tanh':
            return  F.tanh(inputs)
        elif self.enc_act == 'elu':
            return F.elu(inputs)
        elif self.enc_act == 'relu':
            return F.relu(inputs)
        elif self.enc_act == 'selu':
            return F.selu(inputs)
        elif self.enc_act == 'sigmoid':
            return F.sigmoid(inputs)
        else:
            return inputs

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)
        self.deinterleaver.set_parray(p_array)

    def forward(self, inputs):
        inputs = inputs.type(torch.FloatTensor).to(self.device)
        ##############################################################
        #
        # Neural Turbo Decoder
        #
        ##############################################################
        input_shape = inputs.shape
        # Turbo Decoder
        r_sys     = inputs[:,:,0].view((input_shape[0], self.args.block_len, 1))
        r_sys_int = self.interleaver(r_sys)
        r_par1    = inputs[:,:,1].view((input_shape[0], self.args.block_len, 1))
        r_par2    = inputs[:,:,2].view((input_shape[0], self.args.block_len, 1))

        #num_iteration,
        prior = torch.zeros((input_shape[0], self.args.block_len, self.args.num_iter_ft)).to(self.device)

        for idx in range(self.args.num_iteration - 1):

            x_this_dec = torch.cat([r_sys, r_par1, prior], dim = 2)
            x_dec, _   = self.dec_rnn(x_this_dec)
            x_plr      = self.dec_out(x_dec)

            if not self.args.extrinsic:
                x_plr = x_plr - prior

            x_plr_int  = self.interleaver(x_plr)

            x_this_dec = torch.cat([r_sys_int,r_par2, x_plr_int ], dim = 2)

            x_dec, _   = self.dec_rnn(x_this_dec)
            x_plr      = self.dec_out(x_dec)

            if not self.args.extrinsic:
                x_plr = x_plr - x_plr_int

            prior      = self.deinterleaver(x_plr)

        # last round
        x_this_dec = torch.cat([r_sys, r_par1, prior], dim = 2)
        x_dec, _   = self.dec_rnn(x_this_dec)
        x_plr      = self.dec_out(x_dec)

        if not self.args.extrinsic:
            x_plr = x_plr - prior

        x_plr_int  = self.interleaver(x_plr)

        x_this_dec = torch.cat([r_sys_int,r_par2, x_plr_int ], dim = 2)
        x_dec, _   = self.dec_rnn(x_this_dec)
        x_dec      = self.dec_out(x_dec)
        x_final    = self.dec_final(x_dec)
        x_plr      = torch.sigmoid(x_final)

        final      = self.deinterleaver(x_plr)

        return final
Beispiel #23
0
class CCE_Turbo_Encoder1D(nn.Module):
    def __init__(self, args, p_array):
        super(CCE_Turbo_Encoder1D, self).__init__()
        self.args = args

        cuda = True if torch.cuda.is_available() else False
        self.this_device = torch.device("cuda" if cuda else "cpu")

        # Define 1D Network for TurboAE
        self.enc_cnn_1 = SameShapeConv1d(num_layer=args.cce_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.cce_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_1 = torch.nn.Linear(args.cce_num_unit, 1)

        self.enc_cnn_2 = SameShapeConv1d(num_layer=args.cce_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.cce_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_2 = torch.nn.Linear(args.cce_num_unit, 1)

        self.enc_cnn_3 = SameShapeConv1d(num_layer=args.cce_num_layer,
                                         in_channels=args.code_rate_k,
                                         out_channels=args.cce_num_unit,
                                         kernel_size=args.dec_kernel_size)

        self.enc_linear_3 = torch.nn.Linear(args.cce_num_unit, 1)

        self.interleaver = Interleaver(args, p_array)

        self.norm = torch.nn.BatchNorm1d(self.args.code_rate_n, affine=True)

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)

    def forward(self, u_message):
        inputs = 2.0 * u_message - 1.0
        inputs = inputs.view(self.args.batch_size, self.args.code_rate_k,
                             self.args.img_size**2).permute(0, 2, 1)

        x_sys = self.enc_cnn_1(inputs)
        x_sys = self.enc_linear_1(x_sys)

        x_p1 = self.enc_cnn_2(inputs)
        x_p1 = self.enc_linear_2(x_p1)

        x_sys_int = self.interleaver(inputs)

        x_p2 = self.enc_cnn_3(x_sys_int)
        x_p2 = self.enc_linear_3(x_p2)

        x_tx = torch.cat([x_sys, x_p1, x_p2], dim=2)
        x_tx = x_tx.permute(0, 2, 1)
        x_tx = self.norm(x_tx)

        u_tensor = x_tx.view(self.args.batch_size, self.args.code_rate_n,
                             self.args.img_size, self.args.img_size)

        return u_tensor
Beispiel #24
0
class DEC_LargeCNN(torch.nn.Module):
    def __init__(self, args, p_array):
        super(DEC_LargeCNN, self).__init__()
        self.args = args

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.interleaver = Interleaver(args, p_array)
        self.deinterleaver = DeInterleaver(args, p_array)

        self.dec1_cnns = torch.nn.ModuleList()
        self.dec2_cnns = torch.nn.ModuleList()
        self.dec1_outputs = torch.nn.ModuleList()
        self.dec2_outputs = torch.nn.ModuleList()

        for idx in range(args.num_iteration):
            self.dec1_cnns.append(
                SameShapeConv1d(num_layer=args.dec_num_layer,
                                in_channels=2 + args.num_iter_ft,
                                out_channels=args.dec_num_unit,
                                kernel_size=args.dec_kernel_size))

            self.dec2_cnns.append(
                SameShapeConv1d(num_layer=args.dec_num_layer,
                                in_channels=2 + args.num_iter_ft,
                                out_channels=args.dec_num_unit,
                                kernel_size=args.dec_kernel_size))
            self.dec1_outputs.append(
                torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))

            if idx == args.num_iteration - 1:
                self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, 1))
            else:
                self.dec2_outputs.append(
                    torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))

    def set_parallel(self):
        for idx in range(self.args.num_iteration):
            self.dec1_cnns[idx] = torch.nn.DataParallel(self.dec1_cnns[idx])
            self.dec2_cnns[idx] = torch.nn.DataParallel(self.dec2_cnns[idx])
            self.dec1_outputs[idx] = torch.nn.DataParallel(
                self.dec1_outputs[idx])
            self.dec2_outputs[idx] = torch.nn.DataParallel(
                self.dec2_outputs[idx])

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)
        self.deinterleaver.set_parray(p_array)

    def forward(self, received):
        received = received.type(torch.FloatTensor).to(self.this_device)
        # Turbo Decoder
        r_sys = received[:, :, 0].view(
            (self.args.batch_size, self.args.block_len, 1))
        r_sys_int = self.interleaver(r_sys)
        r_par1 = received[:, :, 1].view(
            (self.args.batch_size, self.args.block_len, 1))
        r_par2 = received[:, :, 2].view(
            (self.args.batch_size, self.args.block_len, 1))

        #num_iteration,
        prior = torch.zeros((self.args.batch_size, self.args.block_len,
                             self.args.num_iter_ft)).to(self.this_device)

        for idx in range(self.args.num_iteration - 1):
            x_this_dec = torch.cat([r_sys, r_par1, prior], dim=2)

            x_dec = self.dec1_cnns[idx](x_this_dec)
            x_plr = self.dec1_outputs[idx](x_dec)

            if self.args.extrinsic:
                x_plr = x_plr - prior

            x_plr_int = self.interleaver(x_plr)

            x_this_dec = torch.cat([r_sys_int, r_par2, x_plr_int], dim=2)

            x_dec = self.dec2_cnns[idx](x_this_dec)

            x_plr = self.dec2_outputs[idx](x_dec)

            if self.args.extrinsic:
                x_plr = x_plr - x_plr_int

            prior = self.deinterleaver(x_plr)

        # last round
        x_this_dec = torch.cat([r_sys, r_par1, prior], dim=2)

        x_dec = self.dec1_cnns[self.args.num_iteration - 1](x_this_dec)
        x_plr = self.dec1_outputs[self.args.num_iteration - 1](x_dec)

        if self.args.extrinsic:
            x_plr = x_plr - prior

        x_plr_int = self.interleaver(x_plr)

        x_this_dec = torch.cat([r_sys_int, r_par2, x_plr_int], dim=2)

        x_dec = self.dec2_cnns[self.args.num_iteration - 1](x_this_dec)
        x_plr = self.dec2_outputs[self.args.num_iteration - 1](x_dec)

        final = torch.sigmoid(self.deinterleaver(x_plr))

        return final
Beispiel #25
0
class DEC_LargeRNN(torch.nn.Module):
    def __init__(self, args, p_array):
        super(DEC_LargeRNN, self).__init__()
        self.args = args

        use_cuda = not args.no_cuda and torch.cuda.is_available()
        self.this_device = torch.device("cuda" if use_cuda else "cpu")

        self.interleaver = Interleaver(args, p_array)
        self.deinterleaver = DeInterleaver(args, p_array)

        if args.dec_rnn == 'gru':
            RNN_MODEL = torch.nn.GRU
        elif args.dec_rnn == 'lstm':
            RNN_MODEL = torch.nn.LSTM
        else:
            RNN_MODEL = torch.nn.RNN

        self.dropout = torch.nn.Dropout(args.dropout)

        self.dec1_rnns = torch.nn.ModuleList()
        self.dec2_rnns = torch.nn.ModuleList()
        self.dec1_outputs = torch.nn.ModuleList()
        self.dec2_outputs = torch.nn.ModuleList()

        for idx in range(args.num_iteration):
            self.dec1_rnns.append(
                RNN_MODEL(2 + args.num_iter_ft,
                          args.dec_num_unit,
                          num_layers=2,
                          bias=True,
                          batch_first=True,
                          dropout=args.dropout,
                          bidirectional=True))

            self.dec2_rnns.append(
                RNN_MODEL(2 + args.num_iter_ft,
                          args.dec_num_unit,
                          num_layers=2,
                          bias=True,
                          batch_first=True,
                          dropout=args.dropout,
                          bidirectional=True))

            self.dec1_outputs.append(
                torch.nn.Linear(2 * args.dec_num_unit, args.num_iter_ft))

            if idx == args.num_iteration - 1:
                self.dec2_outputs.append(
                    torch.nn.Linear(2 * args.dec_num_unit, 1))
            else:
                self.dec2_outputs.append(
                    torch.nn.Linear(2 * args.dec_num_unit, args.num_iter_ft))

    def dec_act(self, inputs):
        if self.args.dec_act == 'tanh':
            return F.tanh(inputs)
        elif self.args.dec_act == 'elu':
            return F.elu(inputs)
        elif self.args.dec_act == 'relu':
            return F.relu(inputs)
        elif self.args.dec_act == 'selu':
            return F.selu(inputs)
        elif self.args.dec_act == 'sigmoid':
            return F.sigmoid(inputs)
        elif self.args.dec_act == 'linear':
            return inputs
        else:
            return inputs

    def set_parallel(self):
        for idx in range(self.args.num_iteration):
            self.dec1_rnns[idx] = torch.nn.DataParallel(self.dec1_rnns[idx])
            self.dec2_rnns[idx] = torch.nn.DataParallel(self.dec2_rnns[idx])
            self.dec1_outputs[idx] = torch.nn.DataParallel(
                self.dec1_outputs[idx])
            self.dec2_outputs[idx] = torch.nn.DataParallel(
                self.dec2_outputs[idx])

    def set_interleaver(self, p_array):
        self.interleaver.set_parray(p_array)
        self.deinterleaver.set_parray(p_array)

    def forward(self, received):
        received = received.type(torch.FloatTensor).to(self.this_device)
        # Turbo Decoder
        r_sys = received[:, :, 0].view(
            (self.args.batch_size, self.args.block_len, 1))
        r_sys_int = self.interleaver(r_sys)
        r_par1 = received[:, :, 1].view(
            (self.args.batch_size, self.args.block_len, 1))
        r_par2 = received[:, :, 2].view(
            (self.args.batch_size, self.args.block_len, 1))

        #num_iteration,
        prior = torch.zeros((self.args.batch_size, self.args.block_len,
                             self.args.num_iter_ft)).to(self.this_device)

        for idx in range(self.args.num_iteration - 1):
            x_this_dec = torch.cat([r_sys, r_par1, prior], dim=2)

            if self.args.is_parallel:
                self.dec1_rnns[idx].module.flatten_parameters()
            x_dec, _ = self.dec1_rnns[idx](x_this_dec)
            x_plr = self.dec_act(self.dropout(self.dec1_outputs[idx](x_dec)))

            if self.args.extrinsic:
                x_plr = x_plr - prior

            x_plr_int = self.interleaver(x_plr)

            x_this_dec = torch.cat([r_sys_int, r_par2, x_plr_int], dim=2)

            if self.args.is_parallel:
                self.dec2_rnns[idx].module.flatten_parameters()
            x_dec, _ = self.dec2_rnns[idx](x_this_dec)
            x_plr = self.dec_act(self.dropout(self.dec2_outputs[idx](x_dec)))

            if self.args.extrinsic:
                x_plr = x_plr - x_plr_int

            prior = self.deinterleaver(x_plr)

        # last round
        x_this_dec = torch.cat([r_sys, r_par1, prior], dim=2)

        if self.args.is_parallel:
            self.dec1_rnns[self.args.num_iteration -
                           1].module.flatten_parameters()

        x_dec, _ = self.dec1_rnns[self.args.num_iteration - 1](x_this_dec)
        x_plr = self.dec_act(
            self.dropout(self.dec1_outputs[self.args.num_iteration -
                                           1](x_dec)))

        if self.args.extrinsic:
            x_plr = x_plr - prior

        x_plr_int = self.interleaver(x_plr)

        x_this_dec = torch.cat([r_sys_int, r_par2, x_plr_int], dim=2)

        if self.args.is_parallel:
            self.dec2_rnns[self.args.num_iteration -
                           1].module.flatten_parameters()

        x_dec, _ = self.dec2_rnns[self.args.num_iteration - 1](x_this_dec)

        x_plr = self.dec_act(
            self.dropout(self.dec2_outputs[self.args.num_iteration -
                                           1](x_dec)))

        logit = self.deinterleaver(x_plr)

        final = torch.sigmoid(logit)

        return final