def __init__(self, in_channels, n_classes):
        super(VoxResNet, self).__init__()

        self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm3d(32)
        self.act1 = ActFunc('ReLU')

        self.conv2 = nn.Conv3d(in_channels=32, out_channels=32, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm3d(32)
        self.act2 = ActFunc('ReLU')

        self.conv3 = nn.Conv3d(in_channels=32, out_channels=64, kernel_size=3, padding=1, stride=2)
        self.mod1 = VoxResModule(in_channels=64)
        self.mod2 = VoxResModule(in_channels=64)
        self.bn3 = nn.BatchNorm3d(64)
        self.act3 = ActFunc('ReLU')

        self.conv4 = nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3, padding=1, stride=2)
        self.mod3 = VoxResModule(in_channels=64)
        self.mod4 = VoxResModule(in_channels=64)
        self.bn4 = nn.BatchNorm3d(64)
        self.act4 = ActFunc('ReLU')

        self.conv5 = nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3, padding=1, stride=2)
        self.mod5 = VoxResModule(in_channels=64)
        self.mod6 = VoxResModule(in_channels=64)

        # Deconvolution layers
        self.deconv1 = nn.ConvTranspose3d(in_channels=32, out_channels=n_classes, kernel_size=3, stride=1, padding=1)
        self.deconv2 = nn.ConvTranspose3d(in_channels=64, out_channels=n_classes, kernel_size=2, stride=2)
        self.deconv3 = nn.ConvTranspose3d(in_channels=64, out_channels=n_classes, kernel_size=4, stride=4)
        self.deconv4 = nn.ConvTranspose3d(in_channels=64, out_channels=n_classes, kernel_size=8, stride=8)

        self.softmax = F.softmax
    def __init__(self, in_channels):
        super(VoxResModule, self).__init__()

        self.bn1 = nn.BatchNorm3d(in_channels)
        self.act1 = ActFunc('ReLU')
        self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, padding=1)

        self.bn2 = nn.BatchNorm3d(in_channels)
        self.act2 = ActFunc('ReLU')
        self.conv2 = nn.Conv3d(in_channels=in_channels, out_channels=in_channels, kernel_size=3, padding=1)
    def __init__(self, in_channels, n_convs):
        super(DownTransition, self).__init__()

        out_channels = 2 * in_channels
        self.down_conv = nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=2, stride=2, bias=False)
        self.bn1 = nn.BatchNorm3d(out_channels)
        self.act1 = ActFunc('PReLU', num_parameters=out_channels)

        self.conv_block = _make_conv_layer(out_channels, n_convs)
        self.act2 = ActFunc('PReLU', num_parameters=out_channels)
    def __init__(self, in_channels, out_channels, n_convs):
        super(UpTransition, self).__init__()

        self.de_conv = nn.ConvTranspose3d(in_channels=in_channels,
                                          out_channels=out_channels // 2,
                                          kernel_size=2,
                                          stride=2,
                                          bias=False)
        self.bn1 = nn.BatchNorm3d(out_channels // 2)
        self.act1 = ActFunc('PReLU', num_parameters=out_channels // 2)
        self.dropout1 = CutoutDropout(p=0.2)

        self.conv_block = _make_conv_layer(out_channels, n_convs)
        self.act2 = ActFunc('PReLU', num_parameters=out_channels)
        self.dropout2 = CutoutDropout(p=0.2)
    def __init__(self, in_channels, gating_channels, inter_channels):
        super(AttentionGate, self).__init__()

        self.wx = nn.Conv3d(in_channels=in_channels,
                            out_channels=inter_channels,
                            kernel_size=1,
                            stride=1,
                            bias=False)
        self.wg = nn.Conv3d(in_channels=gating_channels,
                            out_channels=inter_channels,
                            kernel_size=1,
                            stride=1,
                            bias=False)

        self.psi = nn.Conv3d(in_channels=inter_channels,
                             out_channels=1,
                             kernel_size=1,
                             stride=1,
                             bias=False)
        self.act1 = ActFunc('ReLU')
        self.act2 = nn.Sigmoid()

        self.wout = nn.Sequential(
            nn.Conv3d(in_channels=in_channels,
                      out_channels=in_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False), nn.BatchNorm3d(in_channels))
    def __init__(self, in_channels):
        super(OutputTransition, self).__init__()

        self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=2, kernel_size=5, padding=2, bias=False)
        self.bn1 = nn.BatchNorm3d(2)
        self.conv2 = nn.Conv3d(in_channels=2, out_channels=2, kernel_size=1, bias=False)
        self.act1 = ActFunc('PReLU', num_parameters=2)
        self.softmax = F.softmax
    def __init__(self):
        super(InputTransition, self).__init__()

        self.conv1 = nn.Conv3d(in_channels=1,
                               out_channels=16,
                               kernel_size=5,
                               padding=2,
                               bias=False)
        self.bn1 = nn.BatchNorm3d(16)
        self.act1 = ActFunc('PReLU', num_parameters=16)
    def __init__(self, in_channels, out_channels, kernel_size=1):
        super(GatingSignal, self).__init__()

        self.conv = nn.Conv3d(in_channels=in_channels,
                              out_channels=out_channels,
                              kernel_size=kernel_size,
                              stride=1,
                              padding=0,
                              bias=False)
        self.bn = nn.BatchNorm3d(out_channels)
        self.act = ActFunc("ReLU")
def _make_conv_layer(in_channels, n_convs):
    layers = []
    for i in range(n_convs):

        # Adding non-linearity
        if i != 0:
            layers.append(ActFunc('PReLU', num_parameters=in_channels))

        layers.append(ConvBlock(in_channels=in_channels, out_channels=in_channels))

    return nn.Sequential(*layers)
    def __init__(self, in_channels, out_channels):
        super(MPRBlock, self).__init__()
        assert out_channels % 4 == 0

        inter_channels = out_channels // 4

        self.axial = nn.Conv3d(in_channels=in_channels,
                               out_channels=inter_channels,
                               kernel_size=(1, 3, 3),
                               stride=(1, 1, 1),
                               padding=(0, 1, 1),
                               bias=False)
        self.bn_axial = nn.BatchNorm3d(inter_channels)
        self.act_axial = ActFunc('ReLU')

        self.coronal = nn.Conv3d(in_channels=in_channels,
                                 out_channels=inter_channels,
                                 kernel_size=(3, 1, 3),
                                 stride=(1, 1, 1),
                                 padding=(1, 0, 1),
                                 bias=False)
        self.bn_coronal = nn.BatchNorm3d(inter_channels)
        self.act_coronal = ActFunc('ReLU')

        self.sagittal = nn.Conv3d(in_channels=in_channels,
                                  out_channels=inter_channels,
                                  kernel_size=(3, 3, 1),
                                  stride=(1, 1, 1),
                                  padding=(1, 1, 0),
                                  bias=False)
        self.bn_sagittal = nn.BatchNorm3d(inter_channels)
        self.act_sagittal = ActFunc('ReLU')

        self.all = nn.Conv3d(in_channels=in_channels,
                             out_channels=inter_channels,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             bias=False)
        self.bn_all = nn.BatchNorm3d(inter_channels)
        self.act_all = ActFunc('ReLU')
    def _make_layer(self, block, in_channels, growth_rate, nb_layers, drop_rate):
        layers = []

        layers.append(nn.Conv3d(in_channels=in_channels,
                              out_channels=growth_rate,
                              kernel_size=3,
                              stride=1,
                              padding=1))
        layers.append(nn.BatchNorm3d(growth_rate))
        layers.append(ActFunc('ReLU'))

        for i in range(nb_layers-1):
            layers.append(block((i+1)*growth_rate, growth_rate, drop_rate))
        return nn.Sequential(*layers)