Пример #1
0
def conv_block_3d(in_dim, out_dim, activation, stride=1):
    return nn.Sequential(
        nn.Conv3d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
        #nn.BatchNorm3d(out_dim),
        nn.InstanceNorm3d(out_dim, affine=True),
        activation,
    )
Пример #2
0
    def __init__(self, in_features):
        super(ResidualBlock, self).__init__()

        conv_block = [  nn.ReplicationPad3d(1),
                        nn.Conv3d(in_features, in_features, 3),
                        # nn.BatchNorm3d(in_features),
                        nn.InstanceNorm3d(in_features),
                        # nn.BatchNorm3d(in_features),
                        nn.ReLU(inplace=True),
                        nn.ReplicationPad3d(1),
                        nn.Conv3d(in_features, in_features, 3),
                        # nn.BatchNorm3d(in_features)
                        nn.InstanceNorm3d(in_features)  ]
                        # nn.BatchNorm3d(in_features)  ]

        self.conv_block = nn.Sequential(*conv_block)
Пример #3
0
 def __init__(self, inChans, elu, nll):
     super(OutputTransition, self).__init__()
     self.conv1 = nn.Conv3d(inChans, 2, kernel_size=5, padding=2)
     #self.bn1 = ContBatchNorm3d(2)
     self.bn1 = nn.InstanceNorm3d(2)
     self.conv2 = nn.Conv3d(2, 2, kernel_size=1)
     self.relu1 = ELUCons(elu, 2)
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = nn.InstanceNorm3d(planes, affine=True)
     self.conv2 = nn.Conv3d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = nn.InstanceNorm3d(planes, affine=True)
     self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.InstanceNorm3d(planes * 4, affine=True)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Пример #5
0
 def __init__(self,
              channels_in,
              channels_out,
              kernel_size,
              dropout=False,
              batchnorm=True,
              instancenorm=True,
              padding=True):
     super(ConvBlock, self).__init__()
     self.batchnorm = batchnorm
     self.dropout = dropout
     self.instancenorm = instancenorm
     if batchnorm:
         self.batchnorm_layer = nn.BatchNorm3d(channels_out)
     if padding:
         padding = 1
     else:
         padding = 0
     self.conv = nn.Conv3d(channels_in,
                           channels_out,
                           kernel_size,
                           padding=padding)
     if dropout:
         self.dropout_layer = nn.Dropout3d(p=0.2)
     if instancenorm:
         self.instance_layer = nn.InstanceNorm3d(channels_in)
Пример #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 norm="SN",
                 activation=nn.LeakyReLU(0.2, inplace=True)):
        super().__init__()
        if padding == -1:
            padding = tuple(
                ((np.array(kernel_size) - 1) * np.array(dilation)) // 2)
        self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride,
                              padding, dilation, groups, bias)

        self.norm = norm
        if norm == "BN":
            self.norm_layer = nn.BatchNorm3d(out_channels)
        elif norm == "IN":
            self.norm_layer = nn.InstanceNorm3d(out_channels,
                                                track_running_stats=True)
        elif norm == "SN":
            self.norm = None
            self.conv = nn.utils.spectral_norm(self.conv)
        elif norm is None:
            self.norm = None
        else:
            raise NotImplementedError(f"Norm type {norm} not implemented")

        self.activation = activation
        self.sigmoid = nn.Sigmoid()
Пример #7
0
def conv_block(in_dim, out_dim, act_fn):
    model = nn.Sequential(
        nn.Conv3d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
        nn.InstanceNorm3d(out_dim),
        act_fn,
    )
    return model
Пример #8
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv3d(3,
                               64,
                               kernel_size=(3, 3, 3),
                               stride=1,
                               padding=(1, 1, 1),
                               bias=False)
        self.in1 = nn.InstanceNorm3d(64, affine=True)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3),
                                    stride=(2, 2, 2),
                                    padding=(1, 1, 1))
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=(1, 2, 2))
        self.layer3 = self._make_layer(block, 256, layers[2], stride=(1, 2, 2))
        self.layer4 = self._make_layer(block, 512, layers[3], stride=(1, 2, 2))
        self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.InstanceNorm3d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Пример #9
0
def get_normalization(normtype: str, num_channels: int, dim: int = 3):
    """Chooses an implementation for a batch normalization layer."""
    if normtype is None or normtype == 'none':
        return nn.Identity()
    elif normtype.startswith('group'):
        if normtype == 'group':
            num_groups = 8
        elif len(normtype) > len('group') and normtype[len('group'):].isdigit():
            num_groups = int(normtype[len('group'):])
        else:
            raise ValueError(
                f'normtype "{normtype}" not understood. It should be "group<G>",'
                f' where <G> is the number of groups.'
            )
        return nn.GroupNorm(num_groups=num_groups, num_channels=num_channels)
    elif normtype == 'instance':
        if dim == 3:
            return nn.InstanceNorm3d(num_channels)
        elif dim == 2:
            return nn.InstanceNorm2d(num_channels)
        else:
            raise ValueError('dim has to be 2 or 3')
    elif normtype == 'batch':
        if dim == 3:
            return nn.BatchNorm3d(num_channels)
        elif dim == 2:
            return nn.BatchNorm2d(num_channels)
        else:
            raise ValueError('dim has to be 2 or 3')
    else:
        raise ValueError(
            f'Unknown normalization type "{normtype}".\n'
            'Valid choices are "batch", "instance", "group" or "group<G>",'
            'where <G> is the number of groups.'
        )
Пример #10
0
 def __init__(self, in_ch, out_ch):
     super(signal_conv, self).__init__()
     self.conv = nn.Sequential(
         nn.Conv3d(in_ch, out_ch, 3, padding=1),
         nn.InstanceNorm3d(out_ch),
         nn.LeakyReLU(inplace=True),
     )
Пример #11
0
def get_layer_norm(out_planes, norm_mode='', dim=2):
    if norm_mode == '':
        return []
    elif norm_mode == 'bn':
        if dim == 1:
            return [SynchronizedBatchNorm1d(out_planes)]
        elif dim == 2:
            return [SynchronizedBatchNorm2d(out_planes)]
        elif dim == 3:
            return [SynchronizedBatchNorm3d(out_planes)]
    elif norm_mode == 'abn':
        if dim == 1:
            return [nn.BatchNorm1d(out_planes)]
        elif dim == 2:
            return [nn.BatchNorm2d(out_planes)]
        elif dim == 3:
            return [nn.BatchNorm3d(out_planes)]
    elif norm_mode == 'in':
        if dim == 1:
            return [nn.InstanceNorm1d(out_planes)]
        elif dim == 2:
            return [nn.InstanceNorm2d(out_planes)]
        elif dim == 3:
            return [nn.InstanceNorm3d(out_planes)]
    elif norm_mode == 'bin':
        if dim == 1:
            return [BatchInstanceNorm1d(out_planes)]
        elif dim == 2:
            return [BatchInstanceNorm2d(out_planes)]
        elif dim == 3:
            return [BatchInstanceNorm3d(out_planes)]
    raise ValueError('Unknown normalization norm option {}'.format(mode))
Пример #12
0
 def __init__(self, in_channels, mid_channels, out_channels):
     super().__init__()
     self.conv1 = nn.Conv3d(in_channels=in_channels,
                            out_channels=mid_channels,
                            kernel_size=3,
                            padding=1,
                            bias=False)
     self.bn1 = nn.InstanceNorm3d(out_channels)
     self.act1 = nn.LeakyReLU()
     self.conv2 = nn.Conv3d(in_channels=mid_channels,
                            out_channels=out_channels,
                            kernel_size=3,
                            padding=1,
                            bias=False)
     self.bn2 = nn.InstanceNorm3d(out_channels)
     self.act2 = nn.LeakyReLU()
Пример #13
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1,
                 groups=1, bias=True, norm="SN", activation=nn.LeakyReLU(0.2, inplace=True),
                 transpose=False, output_padding=0):
        super().__init__()
        if padding == -1:
            padding = ((np.array(kernel_size) - 1) * np.array(dilation)) // 2
            # to check if padding is not a 0-d array, otherwise tuple(padding) will raise an exception
            if hasattr(padding, '__iter__'):
                padding = tuple(padding)

        if transpose:
            self.conv = nn.ConvTranspose3d(
                in_channels, out_channels, kernel_size,
                stride, padding, output_padding, groups, bias, dilation)
        else:
            self.conv = nn.Conv3d(
                in_channels, out_channels, kernel_size,
                stride, padding, dilation, groups, bias)

        self.norm = norm
        if norm == "BN":
            self.norm_layer = nn.BatchNorm3d(out_channels)
        elif norm == "IN":
            self.norm_layer = nn.InstanceNorm3d(out_channels, track_running_stats=True)
        elif norm == "SN":
            self.norm = None
            self.conv = nn.utils.spectral_norm(self.conv)
        elif norm is None:
            self.norm = None
        else:
            raise NotImplementedError(f"Norm type {norm} not implemented")

        self.activation = activation
Пример #14
0
 def __init__(self, ch_in, ch_out):
     super(conv_block, self).__init__()
     self.conv = nn.Sequential(
         nn.Conv3d(ch_in, ch_out, kernel_size=3, stride=1, padding=1),
         nn.InstanceNorm3d(ch_out), nn.ReLU(inplace=True),
         nn.Conv3d(ch_out, ch_out, kernel_size=3, stride=2, padding=1),
         nn.ReLU(inplace=True))
Пример #15
0
 def __init__(self, input_channels, output_channels, leakiness=1e-2, 
              dropout_p=0.3, kernel_size=3, conv_bias=True, 
              inst_norm_affine=True, lrelu_inplace=True):
     """[To Downsample a given input with convolution operation]
     
     [This one will be used to downsample a given comvolution while doubling 
     the number filters]
     
     Arguments:
         input_channels {[int]} -- [The input number of channels are taken
                                    and then are downsampled to double usually]
         output_channels {[int]} -- [the output number of channels are 
                                     usually the double of what of input]
     
     Keyword Arguments:
         leakiness {float} -- [the negative leakiness] (default: {1e-2})
         conv_bias {bool} -- [to use the bias in filters] (default: {True})
         inst_norm_affine {bool} -- [affine use in norm] (default: {True})
         lrelu_inplace {bool} -- [To update conv outputs with lrelu outputs] 
                                 (default: {True})
     """
     #nn.Module.__init__(self)
     super(DownsamplingModule, self).__init__()
     self.dropout_p=dropout_p
     self.conv_bias = conv_bias
     self.leakiness = leakiness
     self.inst_norm_affine = inst_norm_affine
     self.lrelu_inplace = True
     self.in_0 = nn.InstanceNorm3d(output_channels, 
                                 affine=self.inst_norm_affine,
                                 track_running_stats=True)
     self.conv0 = nn.Conv3d(input_channels, output_channels, kernel_size = 3,
                            stride=2, padding=(kernel_size - 1) // 2, 
                            bias = self.conv_bias)
Пример #16
0
        def encoder_block(in_filters,
                          out_filters,
                          normalization='batchnorm',
                          activation='prelu'):  # BEST: batchnorm + lrelu

            normalizations = nn.ModuleDict(
                [['batchnorm', nn.BatchNorm3d(out_filters)],
                 ['instancenorm',
                  nn.InstanceNorm3d(out_filters)],
                 [
                     'groupnorm',
                     nn.GroupNorm(num_groups=self.nf, num_channels=out_filters)
                 ]])

            activations = nn.ModuleDict(
                [['relu', nn.ReLU()], ['prelu', nn.PReLU()],
                 ['lrelu', nn.LeakyReLU(self.leaky_rate)]])

            block = [
                nn.Conv3d(in_filters,
                          out_filters,
                          kernel_size=self.kernel_size,
                          stride=2,
                          padding=1), normalizations[normalization],
                activations[activation]
            ]

            return block
Пример #17
0
    def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
        super(ResidualConvBlock, self).__init__()

        ops = []
        for i in range(n_stages):
            if i == 0:
                input_channel = n_filters_in
            else:
                input_channel = n_filters_out

            ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
            if normalization == 'batchnorm':
                ops.append(nn.BatchNorm3d(n_filters_out))
            elif normalization == 'groupnorm':
                ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
            elif normalization == 'instancenorm':
                ops.append(nn.InstanceNorm3d(n_filters_out))
            elif normalization != 'none':
                assert False

            if i != n_stages-1:
                ops.append(nn.ReLU(inplace=True))

        self.conv = nn.Sequential(*ops)
        self.relu = nn.ReLU(inplace=True)
Пример #18
0
    def __init__(self, z_size, n_convs, in_size, out_size, padding,
                 instance_norm, separable, leaky):
        super().__init__()
        block = []

        conv = SeparableConv3d if separable else nn.Conv3d
        activation = nn.LeakyReLU if leaky else nn.ReLU

        if padding:
            pad = [(z_size - 1) // 2, 1, 1]
        else:
            pad = [(z_size - 1) // 2, 0, 0]

        block.append(
            conv(in_size, out_size, kernel_size=[z_size, 3, 3], padding=pad))
        block.append(activation())

        for n in range(n_convs - 1):
            block.append(
                conv(out_size,
                     out_size,
                     kernel_size=[z_size, 3, 3],
                     padding=pad))
            block.append(activation())
        if instance_norm:
            block.append(nn.InstanceNorm3d(out_size))

        self.block = nn.Sequential(*block)
Пример #19
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    shortcut_type,
                    cardinality,
                    stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            if shortcut_type == 'A':
                downsample = partial(downsample_basic_block,
                                     planes=planes * block.expansion,
                                     stride=stride)
            else:
                downsample = nn.Sequential(
                    nn.Conv3d(self.inplanes,
                              planes * block.expansion,
                              kernel_size=1,
                              stride=stride,
                              bias=False),
                    nn.InstanceNorm3d(planes * block.expansion))

        layers = []
        layers.append(
            block(self.inplanes, planes, cardinality, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, cardinality))

        return nn.Sequential(*layers)
Пример #20
0
 def _make_conv_layer(self, in_channel, out_channel, kernel_size=3, stride=1, f_stride=None, pad=0):
     if f_stride:
         stride = (f_stride, stride, stride)
     return nn.Sequential(nn.Conv3d(in_channel, out_channel, kernel_size=kernel_size,
                                    stride=stride, bias=False, padding=pad),
                          nn.InstanceNorm3d(out_channel),
                          nn.LeakyReLU(0.2, inplace=True))
Пример #21
0
 def conv_norm_lrelu(self, feat_in, feat_out):
     return nn.Sequential(
         nn.Conv3d(feat_in,
                   feat_out,
                   kernel_size=(3, 3, 3),
                   stride=1,
                   padding=(1, 1, 1),
                   bias=False), nn.InstanceNorm3d(feat_out),
         nn.LeakyReLU(inplace=True),
         nn.Conv3d(feat_out,
                   feat_out,
                   kernel_size=(3, 3, 3),
                   stride=1,
                   padding=(1, 1, 1),
                   bias=False), nn.InstanceNorm3d(feat_out),
         nn.LeakyReLU(inplace=True))
Пример #22
0
 def __init__(self, in_size, out_size):
     super(UNetDown, self).__init__()
     self.model = nn.Sequential(
         nn.Conv3d(in_size, out_size, kernel_size=3, stride=2, padding=1),
         nn.InstanceNorm3d(out_size),
         nn.LeakyReLU(0.2)
       )
    def __init__(self, block, layers, nll=True, num_classes=2):
        self.inplanes = 32
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv3d(1,
                               32,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.InstanceNorm3d(32, affine=True)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = self._make_layer(block, 32, layers[0])
        self.probabilityMapLayer1 = nn.Conv3d(32, 2, kernel_size=1)
        self.layer2 = self._make_layer(block, 64, layers[1], stride=2)
        self.probabilityMapLayer2 = nn.ConvTranspose3d(64,
                                                       2,
                                                       kernel_size=4,
                                                       stride=2,
                                                       padding=1)
        self.layer3 = self._make_layer(block, 128, layers[2], stride=2)
        self.probabilityMapLayer3 = nn.ConvTranspose3d(128,
                                                       2,
                                                       kernel_size=8,
                                                       stride=4,
                                                       padding=2)

        if nll:
            self.softmax = nn.LogSoftmax()
        else:
            self.softmax = nn.Softmax()
Пример #24
0
    def __init__(self,
                 n_filters_in,
                 n_filters_out,
                 stride=2,
                 normalization='none'):
        super(Upsampling, self).__init__()

        ops = []
        ops.append(
            nn.Upsample(scale_factor=stride,
                        mode='trilinear',
                        align_corners=False))
        ops.append(
            nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
        if normalization == 'batchnorm':
            ops.append(nn.BatchNorm3d(n_filters_out))
        elif normalization == 'groupnorm':
            ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
        elif normalization == 'instancenorm':
            ops.append(nn.InstanceNorm3d(n_filters_out))
        elif normalization != 'none':
            assert False
        ops.append(nn.ReLU(inplace=True))

        self.conv = nn.Sequential(*ops)
Пример #25
0
 def discriminator_block(in_filters, out_filters, normalization=True):
     """Returns downsampling layers of each discriminator block"""
     layers = [nn.Conv3d(in_filters, out_filters, 4, stride=2, padding=1)]
     if normalization:
         layers.append(nn.InstanceNorm3d(out_filters))
     layers.append(nn.LeakyReLU(0.2, inplace=True))
     return layers
Пример #26
0
    def __init__(self,
                 n_filters_in,
                 n_filters_out,
                 stride=2,
                 normalization='none'):
        super(DownsamplingConvBlock, self).__init__()

        ops = []
        if normalization != 'none':
            ops.append(
                nn.Conv3d(n_filters_in,
                          n_filters_out,
                          stride,
                          padding=0,
                          stride=stride))
            if normalization == 'batchnorm':
                ops.append(nn.BatchNorm3d(n_filters_out))
            elif normalization == 'groupnorm':
                ops.append(
                    nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
            elif normalization == 'instancenorm':
                ops.append(nn.InstanceNorm3d(n_filters_out))
            else:
                assert False
        else:
            ops.append(
                nn.Conv3d(n_filters_in,
                          n_filters_out,
                          stride,
                          padding=0,
                          stride=stride))

        ops.append(nn.ReLU(inplace=True))

        self.conv = nn.Sequential(*ops)
Пример #27
0
 def __init__(self,
              n_layers: int,
              n_input: int = 1,
              n_output: int = 1,
              kernel_size: Tuple[int] = (3, 3, 3),
              dropout_prob: float = 0,
              dim: int = 3,
              **kwargs):
     super(SimpleConvNet, self).__init__()
     self.n_layers = n_layers
     self.n_input = n_input
     self.n_output = n_output
     self.kernel_sz = kernel_size
     self.dropout_prob = dropout_prob
     self.dim = dim
     self.criterion = nn.MSELoss()
     if isinstance(kernel_size[0], int):
         self.kernel_sz = [kernel_size for _ in range(n_layers)]
     else:
         self.kernel_sz = kernel_size
     pad = nn.ReplicationPad3d if dim == 3 else \
         nn.ReplicationPad2d if dim == 2 else \
             nn.ReplicationPad1d
     self.layers = nn.ModuleList([nn.Sequential(
         pad([ks // 2 for p in zip(ksz, ksz) for ks in p]),
         nn.Conv3d(n_input, n_output, ksz) if dim == 3 else \
             nn.Conv2d(n_input, n_output, ksz) if dim == 2 else \
                 nn.Conv1d(n_input, n_output, ksz),
         nn.ReLU(),
         nn.InstanceNorm3d(n_output, affine=True) if dim == 3 else \
             nn.InstanceNorm2d(n_output, affine=True) if dim == 2 else \
                 nn.InstanceNorm1d(n_output, affine=True),
         nn.Dropout3d(dropout_prob) if dim == 3 else \
             nn.Dropout2d(dropout_prob) if dim == 2 else \
                 nn.Dropout(dropout_prob)) for ksz in self.kernel_sz])
Пример #28
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    kernel3=0,
                    prefix=''):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv3d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                #nn.GroupNorm(32, planes * block.expansion),
                nn.InstanceNorm3d(planes * block.expansion),
            )

        layers = []
        kernel = 1 if kernel3 == 0 else 3
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  kernel_size=kernel))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            kernel = 1 if kernel3 <= i else 3
            layers.append(block(self.inplanes, planes, kernel_size=kernel))

        return nn.Sequential(*layers)
Пример #29
0
 def test_instancenorm(self):
     self._check_one_layer(nn.InstanceNorm1d(16, affine=True),
                           torch.randn(16, 16, 10))
     self._check_one_layer(nn.InstanceNorm2d(16, affine=True),
                           torch.randn(16, 16, 10, 9))
     self._check_one_layer(nn.InstanceNorm3d(16, affine=True),
                           torch.randn(16, 16, 10, 9, 8))
Пример #30
0
 def __init__(self,
              n_in,
              n_out,
              kernel_size,
              stride,
              padding=0,
              norm='None',
              sn=False):
     super(LeakyReLUConv3d, self).__init__()
     model = []
     model += [nn.ReplicationPad3d(padding)]
     if sn:
         model += [
             spectral_norm(
                 nn.Conv3d(n_in,
                           n_out,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=0,
                           bias=True))
         ]
     else:
         model += [
             nn.Conv3d(n_in,
                       n_out,
                       kernel_size=kernel_size,
                       stride=stride,
                       padding=0,
                       bias=True)
         ]
     if 'norm' == 'Instance':
         model += [nn.InstanceNorm3d(n_out, affine=False)]
     model += [nn.LeakyReLU(inplace=True)]
     self.model = nn.Sequential(*model)
     self.model.apply(gaussian_weights_init)