def add_conv_module(conv_net,
                    in_channels,
                    out_channels,
                    kernel_size,
                    stride,
                    padding,
                    batch_norm,
                    conv_bias,
                    activation,
                    leakyrelu_const,
                    layer,
                    embed_cube_edge):
    
#     print("Channel in = " + str(channels))
#     print("Layer in = " + str(layer))
    
    # Convolution Module
    conv_net.add_module("Conv_{0}".format(layer), 
                    nn.Conv2d(in_channels, 
                              out_channels,
                              kernel_size = kernel_size,
                              stride = stride,
                              padding = padding, 
                              bias = conv_bias))
    
    # Batch Norm Module
    if batch_norm == True:
        conv_net.add_module("BatchNorm_{0}".format(layer), 
#                         nn.BatchNorm3d(channels * ch_mult))
                            nn.BatchNorm2d(out_channels)) 
    
    # Activation Module
    if activation == "leakyrelu":
        conv_net.add_module("leakyrelu_{0}".format(layer), 
                        nn.LeakyReLU(leakyrelu_const, inplace = True))
    elif activation == "relu":
        conv_net.add_module("relu_{0}".format(layer), 
                        nn.ReLU(inplace = True)) 
    elif activation == "tanh":
        conv_net.add_module("tanh_{0}".format(layer), 
                        nn.Tanh())
    elif activation == "sigmoid":
        conv_net.add_module("sigmoid_{0}".format(layer), 
                            nn.Sigmoid()) 
        
        
    embed_cube_edge = calculate_conv_output_dim(D = embed_cube_edge,
                                                       K = kernel_size,
                                                       P = padding,
                                                       S = stride)
#     channels = channels * ch_mult
    channels = out_channels
    layer = layer + 1
#     print("Channel out = " + str(channels))
#     print("Layer out = " + str(layer))
    print("Cube Edge out = " + str(embed_cube_edge))
    
    return conv_net, channels, layer, embed_cube_edge
    
    
Пример #2
0
    def __init__(self, 
                 kernel_size = 4,
                 stride = 2,
                 padding = 1,
                 full_conv_limit = 4,
                 just_conv_limit = 1,
                 cube_edge = 128,
                 ch_mult = 2,
                 conv_bias = False,
                 leakyrelu_const = 0.01):        
        super(Encoder, self).__init__()
        print("Encoder = encoder_v02.py")
        
        """
        input: batch_size * channels * cube_edge * cube_edge * cube_edge
        output: batch_size * (channel_multiplier * channels)
        
        BatchNorm is also added.
        
        Conv3D arguments=in_channels,out_channels,kernel_size,stride,padding
        """
       
        """
        Convolutional Layers
        """
        self.embed_cube_edge = cube_edge
        
        conv_net = nn.Sequential()
        channels = 1
        layer = 1
        
        """
        Convolutions with BatchNorm
        """
        while layer <= full_conv_limit:
            conv_net.add_module("Conv_{0}".format(layer), 
                            nn.Conv3d(channels, 
                                      channels * ch_mult,
                                      kernel_size = kernel_size,
                                      stride = stride,
                                      padding = padding, 
                                      bias = conv_bias))
            conv_net.add_module("BatchNorm_{0}".format(layer), 
                            nn.BatchNorm3d(channels * ch_mult)) 
            conv_net.add_module("leakyrelu_{0}".format(layer), 
                            nn.LeakyReLU(leakyrelu_const, inplace = True)) 
            self.embed_cube_edge = calculate_conv_output_dim(D = self.embed_cube_edge,
                                                               K = kernel_size,
                                                               P = padding,
                                                               S = stride)
            channels = channels * ch_mult
            layer = layer + 1
        
        """
        Convolutions without BatchNorm
        """ 
        while layer <= just_conv_limit:
            conv_net.add_module("Conv_{0}".format(layer), 
                            nn.Conv3d(channels, 
                                      channels * ch_mult,
                                      kernel_size = kernel_size,
                                      stride = stride,
                                      padding = padding, 
                                      bias = conv_bias))
            channels = channels * ch_mult
            layer = layer + 1
        
#         conv_net.add_module("Conv_{0}".format(layer), 
#                         nn.Conv3d(channels, 
#                                   channels * ch_mult,
#                                   kernel_size = 4,
#                                   stride = 2,
#                                   padding = 1, 
#                                   bias = conv_bias))
#         channels = channels * ch_mult
        
        
        self.conv_net = conv_net
        self.channels = channels
        self.embed_cube_edge = calculate_conv_output_dim(D = self.embed_cube_edge,
                                           K = kernel_size,
                                           P = padding,
                                           S = stride)
Пример #3
0
    def __init__(self, cube_edge_len, fc1_hidden_dim, fc2_output_dim,
                 embedding_dim, leakyrelu_const, pool_return_indices):
        super(Encoder, self).__init__()

        self.pool_return_indices = pool_return_indices
        """
        Convolutional Layers
        """
        # First Convolutional Layer
        self.conv1_in_channels = 1
        print("Conv1 Input Channel = " + str(self.conv1_in_channels))
        self.conv1_out_channels = self.conv1_in_channels * 2
        print("Conv1 Output Channel = " + str(self.conv1_out_channels))
        self.conv1_kernel = 3
        self.conv1_stride = 1
        self.conv1_padding = 0
        conv1_output_dim = calculate_conv_output_dim(D=cube_edge_len,
                                                     K=self.conv1_kernel,
                                                     P=self.conv1_padding,
                                                     S=self.conv1_stride)
        print("Conv1 Output Dimension = " + str(conv1_output_dim))
        self.conv1_encode = nn.Conv3d(in_channels=self.conv1_in_channels,
                                      out_channels=self.conv1_out_channels,
                                      kernel_size=self.conv1_kernel,
                                      stride=self.conv1_stride,
                                      padding=self.conv1_padding)
        nn.init.xavier_uniform_(self.conv1_encode.weight)
        self.bn1_encode = nn.BatchNorm3d(num_features=self.conv1_out_channels)
        self.leakyrelu1 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # First Pooling
        self.pool1_kernel = 2
        self.pool1_stride = 2
        pool1_output_dim = calculate_pool_output_dim(D=conv1_output_dim,
                                                     K=self.pool1_kernel,
                                                     S=self.pool1_stride)
        print("Pool1 Output Dimension = " + str(pool1_output_dim))
        #         self.pool1_encode = nn.MaxPool3d(kernel_size=self.pool1_kernel,
        #                                             stride=self.pool1_stride,
        #                                             return_indices = self.pool_return_indices)
        self.pool1_encode = nn.AvgPool3d(kernel_size=self.pool1_kernel,
                                         stride=self.pool1_stride,
                                         padding=0,
                                         ceil_mode=False,
                                         count_include_pad=True)

        # Second Convolutional Layer
        self.conv2_in_channels = self.conv1_out_channels
        self.conv2_out_channels = self.conv2_in_channels * 2
        print("Conv2 Output Channel = " + str(self.conv2_out_channels))
        self.conv2_kernel = 4
        self.conv2_stride = 1
        self.conv2_padding = 0
        conv2_output_dim = calculate_conv_output_dim(D=pool1_output_dim,
                                                     K=self.conv2_kernel,
                                                     P=self.conv2_padding,
                                                     S=self.conv2_stride)
        print("Conv2 Output Dimension= " + str(conv2_output_dim))
        self.conv2_encode = nn.Conv3d(in_channels=self.conv2_in_channels,
                                      out_channels=self.conv2_out_channels,
                                      kernel_size=self.conv2_kernel,
                                      stride=self.conv2_stride,
                                      padding=self.conv2_padding)
        nn.init.xavier_uniform_(self.conv2_encode.weight)
        self.bn2_encode = nn.BatchNorm3d(num_features=self.conv2_out_channels)
        self.leakyrelu2 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Second Pooling
        self.pool2_kernel = 2
        self.pool2_stride = 2
        pool2_output_dim = calculate_pool_output_dim(D=conv2_output_dim,
                                                     K=self.pool2_kernel,
                                                     S=self.pool2_stride)
        print("Pool2 Output Dimension = " + str(pool2_output_dim))
        #         self.pool2_encode = nn.MaxPool3d(kernel_size=self.pool2_kernel,
        #                                             stride=self.pool2_stride,
        #                                             return_indices = self.pool_return_indices)
        self.pool2_encode = nn.AvgPool3d(kernel_size=self.pool2_kernel,
                                         stride=self.pool2_stride,
                                         padding=0,
                                         ceil_mode=False,
                                         count_include_pad=True)

        # Third Convolutional Layer
        self.conv3_in_channels = self.conv2_out_channels
        self.conv3_out_channels = self.conv3_in_channels * 2
        print("Conv3 Output Channel = " + str(self.conv3_out_channels))
        self.conv3_kernel = 3
        self.conv3_stride = 1
        self.conv3_padding = 0
        conv3_output_dim = calculate_conv_output_dim(D=pool2_output_dim,
                                                     K=self.conv3_kernel,
                                                     P=self.conv3_padding,
                                                     S=self.conv3_stride)
        print("Conv3 Output Dimension= " + str(conv3_output_dim))
        self.conv3_encode = nn.Conv3d(in_channels=self.conv3_in_channels,
                                      out_channels=self.conv3_out_channels,
                                      kernel_size=self.conv3_kernel,
                                      stride=self.conv3_stride,
                                      padding=self.conv3_padding)
        nn.init.xavier_uniform_(self.conv3_encode.weight)
        self.bn3_encode = nn.BatchNorm3d(num_features=self.conv3_out_channels)
        self.leakyrelu3 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Third Pooling
        self.pool3_kernel = 2
        self.pool3_stride = 2
        pool3_output_dim = calculate_pool_output_dim(D=conv3_output_dim,
                                                     K=self.pool3_kernel,
                                                     S=self.pool3_stride)
        print("Pool3 Output Dimension = " + str(pool3_output_dim))
        #         self.pool3_encode = nn.MaxPool3d(kernel_size=self.pool3_kernel,
        #                                             stride=self.pool3_stride,
        #                                             return_indices = self.pool_return_indices)
        self.pool3_encode = nn.AvgPool3d(kernel_size=self.pool3_kernel,
                                         stride=self.pool3_stride,
                                         padding=0,
                                         ceil_mode=False,
                                         count_include_pad=True)

        # Fourth Convolutional Layer
        self.conv4_in_channels = self.conv3_out_channels
        self.conv4_out_channels = self.conv4_in_channels * 2
        print("Conv4 Output Channel = " + str(self.conv4_out_channels))
        self.conv4_kernel = 4
        self.conv4_stride = 2
        self.conv4_padding = 0
        conv4_output_dim = calculate_conv_output_dim(D=pool3_output_dim,
                                                     K=self.conv4_kernel,
                                                     P=self.conv4_padding,
                                                     S=self.conv4_stride)
        print("Conv4 Output Dimension= " + str(conv4_output_dim))
        self.conv4_encode = nn.Conv3d(in_channels=self.conv4_in_channels,
                                      out_channels=self.conv4_out_channels,
                                      kernel_size=self.conv4_kernel,
                                      stride=self.conv4_stride,
                                      padding=self.conv4_padding)
        nn.init.xavier_uniform_(self.conv4_encode.weight)
        self.bn4_encode = nn.BatchNorm3d(num_features=self.conv4_out_channels)
        self.leakyrelu4 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Fifth Convolutional Layer
        self.conv5_in_channels = self.conv4_out_channels
        self.conv5_out_channels = self.conv5_in_channels * 2
        print("Conv5 Output Channel = " + str(self.conv5_out_channels))
        self.conv5_kernel = 3
        self.conv5_stride = 1
        self.conv5_padding = 0
        conv5_output_dim = calculate_conv_output_dim(D=conv4_output_dim,
                                                     K=self.conv5_kernel,
                                                     P=self.conv5_padding,
                                                     S=self.conv5_stride)
        print("Conv5 Output Dimension= " + str(conv5_output_dim))
        self.conv5_encode = nn.Conv3d(in_channels=self.conv5_in_channels,
                                      out_channels=self.conv5_out_channels,
                                      kernel_size=self.conv5_kernel,
                                      stride=self.conv5_stride,
                                      padding=self.conv5_padding)
        nn.init.xavier_uniform_(self.conv5_encode.weight)
        self.bn5_encode = nn.BatchNorm3d(num_features=self.conv5_out_channels)
        self.leakyrelu5 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Sixth Convolutional Layer
        self.conv6_in_channels = self.conv5_out_channels
        self.conv6_out_channels = self.conv6_in_channels * 2
        print("Conv6 Output Channel = " + str(self.conv6_out_channels))
        self.conv6_kernel = 2
        self.conv6_stride = 1
        self.conv6_padding = 0
        conv6_output_dim = calculate_conv_output_dim(D=conv5_output_dim,
                                                     K=self.conv6_kernel,
                                                     P=self.conv6_padding,
                                                     S=self.conv6_stride)
        print("Conv6 Output Dimension= " + str(conv6_output_dim))
        self.conv6_encode = nn.Conv3d(in_channels=self.conv6_in_channels,
                                      out_channels=self.conv6_out_channels,
                                      kernel_size=self.conv6_kernel,
                                      stride=self.conv6_stride,
                                      padding=self.conv6_padding)
        nn.init.xavier_uniform_(self.conv6_encode.weight)
        self.bn6_encode = nn.BatchNorm3d(num_features=self.conv6_out_channels)
        self.leakyrelu6 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # 7th Convolutional Layer
        self.conv7_in_channels = self.conv6_out_channels
        self.conv7_out_channels = self.conv7_in_channels * 2
        print("Conv7 Output Channel = " + str(self.conv7_out_channels))
        self.conv7_kernel = 2
        self.conv7_stride = 1
        self.conv7_padding = 0
        conv7_output_dim = calculate_conv_output_dim(D=conv6_output_dim,
                                                     K=self.conv7_kernel,
                                                     P=self.conv7_padding,
                                                     S=self.conv7_stride)
        print("Conv7 Output Dimension= " + str(conv7_output_dim))
        self.conv7_encode = nn.Conv3d(in_channels=self.conv7_in_channels,
                                      out_channels=self.conv7_out_channels,
                                      kernel_size=self.conv7_kernel,
                                      stride=self.conv7_stride,
                                      padding=self.conv7_padding)
        nn.init.xavier_uniform_(self.conv7_encode.weight)
        self.bn7_encode = nn.BatchNorm3d(num_features=self.conv7_out_channels)
        #         self.relu7 = nn.ReLU(inplace=True)
        self.leakyrelu7 = nn.LeakyReLU(leakyrelu_const, inplace=True)
        """
        Fully Connected Layers
        """
        #        1st FC Layer
        #         in_feauters  = output channels from convolution x the cube size of the convolution output
        self.fc1_in_features = self.conv7_out_channels * conv7_output_dim**3
        print("FC1 Input Dimension= " + str(self.fc1_in_features))
        self.fc1_encode = nn.Linear(in_features=self.fc1_in_features,
                                    out_features=fc1_hidden_dim)
        self.leakyrelu8 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # 2nd FC Layer
        print("FC2 Input Dimension= " + str(fc1_hidden_dim))
        self.fc2_encode = nn.Linear(in_features=fc1_hidden_dim,
                                    out_features=fc2_output_dim)
        self.leakyrelu9 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # 3rd FC Layer
        print("FC3 Input Dimension= " + str(fc2_output_dim))
        self.fc3_encode = nn.Linear(in_features=fc2_output_dim,
                                    out_features=embedding_dim)
        self.relu1 = nn.ReLU(inplace=True)
        print("FC3 Output Dimension= " + str(embedding_dim))
Пример #4
0
    def __init__(self,
                 input_cube_edge=128,
                 full_conv_limit=4,
                 full_fc_limit=3,
                 ch_mult=2,
                 conv_bias=False,
                 fc_bias=False,
                 leakyrelu_const=0.01):
        super(Encoder, self).__init__()
        """
        input: batch_size * channels * cube_edge * cube_edge * cube_edge
        output: batch_size * (channel_multiplier * channels)
        BatchNorm is also added.
        Conv3D arguments=in_channels,out_channels,kernel_size,stride,padding
        Make sure that the names of the layers are specified according to
        weight init function.
        
        """
        """
        Convolutional Layers
        """
        conv_net = nn.Sequential()
        channels = 1
        layer = 1
        while layer <= full_conv_limit:
            conv_net.add_module(
                "Conv_{0}".format(layer),
                nn.Conv3d(channels,
                          channels * ch_mult,
                          kernel_size=4,
                          stride=2,
                          padding=1,
                          bias=conv_bias))
            conv_net.add_module("BatchNorm_{0}".format(layer),
                                nn.BatchNorm3d(channels * ch_mult))
            conv_net.add_module("leakyrelu_{0}".format(layer),
                                nn.LeakyReLU(leakyrelu_const, inplace=True))
            # update channel count, layer number and convoluted cube edge size
            channels = channels * ch_mult
            layer = layer + 1
            input_cube_edge = calculate_conv_output_dim(D=input_cube_edge,
                                                        K=4,
                                                        P=1,
                                                        S=2)
        # final conv layer
        conv_net.add_module(
            "Conv_{0}".format(layer),
            nn.Conv3d(channels,
                      channels * ch_mult,
                      kernel_size=4,
                      stride=2,
                      padding=1,
                      bias=conv_bias))
        input_cube_edge = calculate_conv_output_dim(D=input_cube_edge,
                                                    K=4,
                                                    P=1,
                                                    S=2)
        out_channels = channels * ch_mult
        self.conv_net = conv_net
        """
        Fully Connected Layers
        """
        total_fc_input = input_cube_edge**3 * out_channels
        fc_net = nn.Sequential()
        layer = 1
        while layer <= full_fc_limit:
            fc_net.add_module(
                "Linear_{0}".format(layer),
                nn.Linear(in_features=total_fc_input,
                          out_features=total_fc_input // 2,
                          bias=fc_bias))
            layer = layer + 1
            total_fc_input = total_fc_input // 2
        self.fc_net = fc_net

        # store FC output dimension & embed cube edge size &
        # channel count for use in Decoders
        self.fc_output_dim = total_fc_input
        self.embed_cube_edge = input_cube_edge
        self.out_channels = out_channels
Пример #5
0
    def __init__(self, cube_dimension, fc1_hidden_dim, fc2_output_dim,
                 embedding_dim, leakyrelu_const, pool_return_indices):
        super(Encoder, self).__init__()

        self.pool_return_indices = pool_return_indices

        # First Convolutional Layer
        self.conv1_in_channels = 1
        self.conv1_out_channels = 4
        self.conv1_kernel = 3
        self.conv1_stride = 1
        self.conv1_padding = 0
        conv1_output_dim = calculate_conv_output_dim(D=cube_dimension,
                                                     K=self.conv1_kernel,
                                                     P=self.conv1_padding,
                                                     S=self.conv1_stride)
        print("Conv1 Output Dimension = " + str(conv1_output_dim))
        self.conv1_encode = nn.Conv3d(in_channels=self.conv1_in_channels,
                                      out_channels=self.conv1_out_channels,
                                      kernel_size=self.conv1_kernel,
                                      stride=self.conv1_stride,
                                      padding=self.conv1_padding)
        nn.init.xavier_uniform_(self.conv1_encode.weight)
        self.bn1_encode = nn.BatchNorm3d(num_features=self.conv1_out_channels)
        self.leakyrelu1 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # First Pooling
        self.pool1_kernel = 2
        self.pool1_stride = 2
        pool1_output_dim = calculate_pool_output_dim(D=conv1_output_dim,
                                                     K=self.pool1_kernel,
                                                     S=self.pool1_stride)
        print("Pool1 Output Dimension = " + str(pool1_output_dim))
        #         self.pool1_encode = nn.MaxPool3d(kernel_size=self.pool1_kernel,
        #                                             stride=self.pool1_stride,
        #                                             return_indices = self.pool_return_indices)
        self.pool1_encode = nn.AvgPool3d(kernel_size=self.pool1_kernel,
                                         stride=self.pool1_stride,
                                         padding=0,
                                         ceil_mode=False,
                                         count_include_pad=True)

        # Second Convolutional Layer
        self.conv2_in_channels = self.conv1_out_channels
        self.conv2_out_channels = 24
        self.conv2_kernel = 4
        self.conv2_stride = 1
        self.conv2_padding = 0
        conv2_output_dim = calculate_conv_output_dim(D=pool1_output_dim,
                                                     K=self.conv2_kernel,
                                                     P=self.conv2_padding,
                                                     S=self.conv2_stride)
        print("Conv2 Output Dimension= " + str(conv2_output_dim))
        self.conv2_encode = nn.Conv3d(in_channels=self.conv2_in_channels,
                                      out_channels=self.conv2_out_channels,
                                      kernel_size=self.conv2_kernel,
                                      stride=self.conv2_stride,
                                      padding=self.conv2_padding)
        nn.init.xavier_uniform_(self.conv2_encode.weight)
        self.bn2_encode = nn.BatchNorm3d(num_features=self.conv2_out_channels)
        self.leakyrelu2 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Second Pooling
        self.pool2_kernel = 2
        self.pool2_stride = 2
        pool2_output_dim = calculate_pool_output_dim(D=conv2_output_dim,
                                                     K=self.pool2_kernel,
                                                     S=self.pool2_stride)
        print("Pool2 Output Dimension = " + str(pool2_output_dim))
        #         self.pool2_encode = nn.MaxPool3d(kernel_size=self.pool2_kernel,
        #                                             stride=self.pool2_stride,
        #                                             return_indices = self.pool_return_indices)
        self.pool2_encode = nn.AvgPool3d(kernel_size=self.pool2_kernel,
                                         stride=self.pool2_stride,
                                         padding=0,
                                         ceil_mode=False,
                                         count_include_pad=True)

        # Third Convolutional Layer
        self.conv3_in_channels = self.conv2_out_channels
        self.conv3_out_channels = 48
        self.conv3_kernel = 3
        self.conv3_stride = 1
        self.conv3_padding = 0
        conv3_output_dim = calculate_conv_output_dim(D=pool2_output_dim,
                                                     K=self.conv3_kernel,
                                                     P=self.conv3_padding,
                                                     S=self.conv3_stride)
        print("Conv3 Output Dimension= " + str(conv3_output_dim))
        self.conv3_encode = nn.Conv3d(in_channels=self.conv3_in_channels,
                                      out_channels=self.conv3_out_channels,
                                      kernel_size=self.conv3_kernel,
                                      stride=self.conv3_stride,
                                      padding=self.conv3_padding)
        nn.init.xavier_uniform_(self.conv3_encode.weight)
        self.bn3_encode = nn.BatchNorm3d(num_features=self.conv3_out_channels)
        self.leakyrelu3 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Third Pooling
        self.pool3_kernel = 2
        self.pool3_stride = 2
        pool3_output_dim = calculate_pool_output_dim(D=conv3_output_dim,
                                                     K=self.pool3_kernel,
                                                     S=self.pool3_stride)
        print("Pool3 Output Dimension = " + str(pool3_output_dim))
        #         self.pool3_encode = nn.MaxPool3d(kernel_size=self.pool3_kernel,
        #                                             stride=self.pool3_stride,
        #                                             return_indices = self.pool_return_indices)
        self.pool3_encode = nn.AvgPool3d(kernel_size=self.pool3_kernel,
                                         stride=self.pool3_stride,
                                         padding=0,
                                         ceil_mode=False,
                                         count_include_pad=True)

        # Fourth Convolutional Layer
        self.conv4_in_channels = self.conv3_out_channels
        self.conv4_out_channels = 64
        self.conv4_kernel = 4
        self.conv4_stride = 2
        self.conv4_padding = 0
        conv4_output_dim = calculate_conv_output_dim(D=pool3_output_dim,
                                                     K=self.conv4_kernel,
                                                     P=self.conv4_padding,
                                                     S=self.conv4_stride)
        print("Conv4 Output Dimension= " + str(conv4_output_dim))
        self.conv4_encode = nn.Conv3d(in_channels=self.conv4_in_channels,
                                      out_channels=self.conv4_out_channels,
                                      kernel_size=self.conv4_kernel,
                                      stride=self.conv4_stride,
                                      padding=self.conv4_padding)
        nn.init.xavier_uniform_(self.conv4_encode.weight)
        self.bn4_encode = nn.BatchNorm3d(num_features=self.conv4_out_channels)
        self.leakyrelu4 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Fifth Convolutional Layer
        self.conv5_in_channels = self.conv4_out_channels
        self.conv5_out_channels = 128
        self.conv5_kernel = 3
        self.conv5_stride = 1
        self.conv5_padding = 0
        conv5_output_dim = calculate_conv_output_dim(D=conv4_output_dim,
                                                     K=self.conv5_kernel,
                                                     P=self.conv5_padding,
                                                     S=self.conv5_stride)
        print("Conv5 Output Dimension= " + str(conv5_output_dim))
        self.conv5_encode = nn.Conv3d(in_channels=self.conv5_in_channels,
                                      out_channels=self.conv5_out_channels,
                                      kernel_size=self.conv5_kernel,
                                      stride=self.conv5_stride,
                                      padding=self.conv5_padding)
        nn.init.xavier_uniform_(self.conv5_encode.weight)
        self.bn5_encode = nn.BatchNorm3d(num_features=self.conv5_out_channels)
        self.leakyrelu5 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # Sixth Convolutional Layer
        self.conv6_in_channels = self.conv5_out_channels
        self.conv6_out_channels = 256
        self.conv6_kernel = 2
        self.conv6_stride = 1
        self.conv6_padding = 0
        conv6_output_dim = calculate_conv_output_dim(D=conv5_output_dim,
                                                     K=self.conv6_kernel,
                                                     P=self.conv6_padding,
                                                     S=self.conv6_stride)
        print("Conv6 Output Dimension= " + str(conv6_output_dim))
        self.conv6_encode = nn.Conv3d(in_channels=self.conv6_in_channels,
                                      out_channels=self.conv6_out_channels,
                                      kernel_size=self.conv6_kernel,
                                      stride=self.conv6_stride,
                                      padding=self.conv6_padding)
        nn.init.xavier_uniform_(self.conv6_encode.weight)
        self.bn6_encode = nn.BatchNorm3d(num_features=self.conv6_out_channels)
        self.leakyrelu6 = nn.LeakyReLU(leakyrelu_const, inplace=True)

        # 7th Convolutional Layer
        self.conv7_in_channels = self.conv6_out_channels
        self.conv7_out_channels = 256
        self.conv7_kernel = 2
        self.conv7_stride = 1
        self.conv7_padding = 0
        conv7_output_dim = calculate_conv_output_dim(D=conv6_output_dim,
                                                     K=self.conv7_kernel,
                                                     P=self.conv7_padding,
                                                     S=self.conv7_stride)
        print("Conv7 Output Dimension= " + str(conv7_output_dim))
        self.conv7_encode = nn.Conv3d(in_channels=self.conv7_in_channels,
                                      out_channels=self.conv7_out_channels,
                                      kernel_size=self.conv7_kernel,
                                      stride=self.conv7_stride,
                                      padding=self.conv7_padding)
        nn.init.xavier_uniform_(self.conv7_encode.weight)