def __init__(self, args, p_array): # turbofy only for code rate 1/3 super(CNN_encoder_rate3, self).__init__(args) self.args = args # Encoder self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1) self.interleaver = Interleaver(args, p_array)
def __init__(self, args, p_array): super(TurboAE_decoder1D, self).__init__() self.args = args cuda = True if torch.cuda.is_available() else False self.this_device = torch.device("cuda" if cuda else "cpu") self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) self.dec1_cnns = torch.nn.ModuleList() self.dec2_cnns = torch.nn.ModuleList() self.dec1_outputs = torch.nn.ModuleList() self.dec2_outputs = torch.nn.ModuleList() for idx in range(args.num_iteration): self.dec1_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec2_cnns.append(SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) ) self.dec1_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration -1: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.code_rate_k)) else: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) # also need some CNN for f self.ftstart = SameShapeConv2d(num_layer=args.dec_num_layer, in_channels=args.img_channels, out_channels= args.dec_num_unit, kernel_size = args.dec_kernel_size) self.ftend = SameShapeConv2d(num_layer=1, in_channels=args.dec_num_unit, out_channels= args.img_channels, kernel_size = args.dec_kernel_size)
def __init__(self, args, p_array): super(CCE_Turbo_Encoder1D, self).__init__() self.args = args cuda = True if torch.cuda.is_available() else False self.this_device = torch.device("cuda" if cuda else "cpu") # Define 1D Network for TurboAE self.enc_cnn_1 = SameShapeConv1d(num_layer=args.cce_num_layer, in_channels=args.code_rate_k, out_channels=args.cce_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.cce_num_unit, 1) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.cce_num_layer, in_channels=args.code_rate_k, out_channels=args.cce_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_2 = torch.nn.Linear(args.cce_num_unit, 1) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.cce_num_layer, in_channels=args.code_rate_k, out_channels=args.cce_num_unit, kernel_size=args.dec_kernel_size) self.enc_linear_3 = torch.nn.Linear(args.cce_num_unit, 1) self.interleaver = Interleaver(args, p_array) self.norm = torch.nn.BatchNorm1d(self.args.code_rate_n, affine=True)
def __init__(self, args, p_array): super(DEC_LargeCNN, self).__init__() self.args = args use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.interleaver = Interleaver(args, p_array) self.deinterleaver = DeInterleaver(args, p_array) self.dec1_cnns = torch.nn.ModuleList() self.dec2_cnns = torch.nn.ModuleList() self.dec1_outputs = torch.nn.ModuleList() self.dec2_outputs = torch.nn.ModuleList() for idx in range(args.num_iteration): self.dec1_cnns.append( SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels=args.dec_num_unit, kernel_size=args.dec_kernel_size)) self.dec2_cnns.append( SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=2 + args.num_iter_ft, out_channels=args.dec_num_unit, kernel_size=args.dec_kernel_size)) self.dec1_outputs.append( torch.nn.Linear(args.dec_num_unit, args.num_iter_ft)) if idx == args.num_iteration - 1: self.dec2_outputs.append(torch.nn.Linear(args.dec_num_unit, 1)) else: self.dec2_outputs.append( torch.nn.Linear(args.dec_num_unit, args.num_iter_ft))
def __init__(self, args, p_array): # turbofy only for code rate 1/3 super(ENC_interCNN2Int, self).__init__(args) self.args = args # Encoder self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size) self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels= args.enc_num_unit, kernel_size = args.dec_kernel_size) self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1) self.interleaver1 = Interleaver(args, p_array) seed2 = 1000 rand_gen2 = mtrand.RandomState(seed2) p_array2 = rand_gen2.permutation(arange(args.block_len)) print('p_array1', p_array) print('p_array2', p_array2) self.interleaver2 = Interleaver(args, p_array2)
def __init__(self, args): super(DeModulation, self).__init__() use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.args = args self.demod_layer = SameShapeConv1d(num_layer=args.demod_num_layer, in_channels=2, out_channels= self.args.demod_num_unit, kernel_size = 1) self.demod_final = SameShapeConv1d(num_layer=1, in_channels=args.demod_num_unit, out_channels= args.mod_rate, kernel_size = 1, no_act = True)
def __init__(self, args, input_size=1, is_systematic_bit=False, is_interleave=False, p_array=[]): super(CNN_encoder, self).__init__() use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.args = args self.is_systematic_bit = is_systematic_bit self.is_interleave = is_interleave if self.is_interleave: self.interleaver = Interleaver(args, p_array) self.p_array = p_array # Encoder self.enc_cnn = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=input_size, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_linear = torch.nn.Linear(args.enc_num_unit, 1)
def __init__(self, args, p_array): # turbofy only for code rate 1/3 super(ENC_interCNN, self).__init__(args) self.args = args # Encoder if self.args.encoder == 'TurboAE_rate3_cnn': self.enc_cnn_1 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_2 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_3 = SameShapeConv1d(num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) else: # Dense self.enc_cnn_1 = DenseSameShapeConv1d( num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_2 = DenseSameShapeConv1d( num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_cnn_3 = DenseSameShapeConv1d( num_layer=args.enc_num_layer, in_channels=args.code_rate_k, out_channels=args.enc_num_unit, kernel_size=args.enc_kernel_size) self.enc_linear_1 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_linear_2 = torch.nn.Linear(args.enc_num_unit, 1) self.enc_linear_3 = torch.nn.Linear(args.enc_num_unit, 1) self.interleaver = Interleaver(args, p_array)
def __init__(self, args): super(CNN_decoder, self).__init__() use_cuda = not args.no_cuda and torch.cuda.is_available() self.this_device = torch.device("cuda" if use_cuda else "cpu") self.args = args self.dec_cnn = SameShapeConv1d(num_layer=args.dec_num_layer, in_channels=args.code_rate_n, out_channels=args.dec_num_unit, kernel_size=args.dec_kernel_size) self.dec_output = torch.nn.Linear(args.dec_num_unit, args.code_rate_k)