示例#1
0
    def __init__(
        self,
        input_dim,
        output_dim,
        kernel_size,
        stride,
        padding=0,
        dilation=1,
        norm="none",
        activation="relu",
        pad_type="zero",
    ):
        super().__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == "reflect":
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == "replicate":
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == "zero":
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == "batch":
            self.norm = nn.BatchNorm2d(norm_dim)
        elif norm == "instance":
            # self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == "layer":
            self.norm = LayerNorm(norm_dim)
        elif norm == "adain":
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == "spectral":
            self.norm = None  # dealt with later in the code
        elif norm == "none":
            self.norm = None
        else:
            raise ValueError("Unsupported normalization: {}".format(norm))

        # initialize activation
        if activation == "relu":
            self.activation = nn.ReLU(inplace=True)
        elif activation == "lrelu":
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == "prelu":
            self.activation = nn.PReLU()
        elif activation == "selu":
            self.activation = nn.SELU(inplace=True)
        elif activation == "tanh":
            self.activation = nn.Tanh()
        elif activation == "sigmoid":
            self.activation = nn.Sigmoid()
        elif activation == "none":
            self.activation = None
        else:
            raise ValueError("Unsupported activation: {}".format(activation))

        # initialize convolution
        if norm == "spectral":
            self.conv = SpectralNorm(
                nn.Conv2d(
                    input_dim,
                    output_dim,
                    kernel_size,
                    stride,
                    dilation=dilation,
                    bias=self.use_bias,
                )
            )
        else:
            self.conv = nn.Conv2d(
                input_dim,
                output_dim,
                kernel_size,
                stride,
                dilation=dilation,
                bias=self.use_bias,
            )
示例#2
0
def conv3x3(in_channels, out_channels, stride=1):
    return nn.Sequential(
        nn.ReplicationPad2d(1),
        nn.Conv2d(in_channels, out_channels, 3, stride=stride))
 def test_replicationpad2d(self):
     model = nn.ReplicationPad2d((1, 2, 3, 4))
     self.run_model_test(model, train=False, batch_size=BATCH_SIZE)
示例#4
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 norm='none',
                 activation='relu',
                 pad_type='zero'):
        super(ConvBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm2d(norm_dim)
        elif norm == 'in':
            #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'ln':
            self.norm = LayerNorm(norm_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == 'none' or norm == 'sn':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # initialize convolution
        if norm == 'sn':
            self.conv = SpectralNorm(
                nn.Conv2d(input_dim,
                          output_dim,
                          kernel_size,
                          stride,
                          bias=self.use_bias))
        else:
            self.conv = nn.Conv2d(input_dim,
                                  output_dim,
                                  kernel_size,
                                  stride,
                                  bias=self.use_bias)
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 conv_padding=0,
                 dilation=1,
                 weight_norm='none',
                 norm='none',
                 activation='relu',
                 pad_type='zero',
                 transpose=False):
        super(Conv2dBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        elif pad_type == 'none':
            self.pad = None
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm2d(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        if weight_norm == 'sn':
            self.weight_norm = spectral_norm_fn
        elif weight_norm == 'wn':
            self.weight_norm = weight_norm_fn
        elif weight_norm == 'none':
            self.weight_norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(weight_norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'elu':
            self.activation = nn.ELU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # initialize convolution
        if transpose:
            self.conv = nn.ConvTranspose2d(input_dim,
                                           output_dim,
                                           kernel_size,
                                           stride,
                                           padding=conv_padding,
                                           output_padding=conv_padding,
                                           dilation=dilation,
                                           bias=self.use_bias)
        else:
            self.conv = nn.Conv2d(input_dim,
                                  output_dim,
                                  kernel_size,
                                  stride,
                                  padding=conv_padding,
                                  dilation=dilation,
                                  bias=self.use_bias)

        if self.weight_norm:
            self.conv = self.weight_norm(self.conv)
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout,
                         use_bias):
        """Construct a convolutional block.



        Parameters:

            dim (int)           -- the number of channels in the conv layer.

            padding_type (str)  -- the name of padding layer: reflect | replicate | zero

            norm_layer          -- normalization layer

            use_dropout (bool)  -- if use dropout layers.

            use_bias (bool)     -- if the conv layer uses bias or not



        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))

        """

        conv_block = []

        p = 0

        if padding_type == 'reflect':

            conv_block += [nn.ReflectionPad2d(1)]

        elif padding_type == 'replicate':

            conv_block += [nn.ReplicationPad2d(1)]

        elif padding_type == 'zero':

            p = 1

        else:

            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            norm_layer(dim),
            nn.ReLU(True)
        ]

        if use_dropout:

            conv_block += [nn.Dropout(0.5)]

        p = 0

        if padding_type == 'reflect':

            conv_block += [nn.ReflectionPad2d(1)]

        elif padding_type == 'replicate':

            conv_block += [nn.ReplicationPad2d(1)]

        elif padding_type == 'zero':

            p = 1

        else:

            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
            norm_layer(dim)
        ]

        return nn.Sequential(*conv_block)
示例#7
0
 def __init__(self):
     super().__init__()
     self.layers = nn.Sequential(nn.ReplicationPad2d(1),
                                 nn.Conv2d(64, 256, 3, stride=1, padding=0),
                                 nn.PixelShuffle(upscale_factor=2),
                                 nn.PReLU())
示例#8
0
    def __init__(self,
                 input_nc,
                 outer_nc,
                 inner_nc,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 norm_layer=None,
                 nl_layer=None,
                 use_dropout=False,
                 upsample='basic',
                 padding_type='zero'):
        super(UnetBlock, self).__init__()
        self.outermost = outermost
        p = 0
        downconv = []
        if padding_type == 'reflect':
            downconv += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            downconv += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        downconv += [
            nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=p)
        ]
        # downsample is different from upsample
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = norm_layer(inner_nc) if norm_layer is not None else None
        uprelu = nl_layer()
        upnorm = norm_layer(outer_nc) if norm_layer is not None else None

        if outermost:
            upconv = upsampleLayer(inner_nc * 2,
                                   outer_nc,
                                   upsample=upsample,
                                   padding_type=padding_type)
            down = downconv
            up = [uprelu] + upconv + [nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = upsampleLayer(inner_nc,
                                   outer_nc,
                                   upsample=upsample,
                                   padding_type=padding_type)
            down = [downrelu] + downconv
            up = [uprelu] + upconv
            if upnorm is not None:
                up += [upnorm]
            model = down + up
        else:
            upconv = upsampleLayer(inner_nc * 2,
                                   outer_nc,
                                   upsample=upsample,
                                   padding_type=padding_type)
            down = [downrelu] + downconv
            if downnorm is not None:
                down += [downnorm]
            up = [uprelu] + upconv
            if upnorm is not None:
                up += [upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)
    def __init__(self, number_of_classes=2, input_size=(1, 22, 20)):
        super(Net, self).__init__()
        self.collection11 = nn.Sequential(nn.BatchNorm2d(1),
                                          nn.ReplicationPad2d(2),
                                          nn.Conv2d(input_size[0], 10, 5),
                                          nn.Dropout2d(), nn.MaxPool2d(2),
                                          nn.ReLU(), nn.BatchNorm2d(10),
                                          nn.ReplicationPad2d(1),
                                          nn.Conv2d(10, 20, 3), nn.Dropout2d(),
                                          nn.MaxPool2d(2), nn.ReLU())
        self.collection12 = nn.Sequential(nn.BatchNorm2d(1),
                                          nn.ReplicationPad2d(2),
                                          nn.Conv2d(input_size[0], 50, 5),
                                          nn.Dropout2d(), nn.MaxPool2d(2),
                                          nn.ReLU(), nn.BatchNorm2d(50),
                                          nn.ReplicationPad2d(1),
                                          nn.Conv2d(50, 100, 3),
                                          nn.Dropout2d(), nn.MaxPool2d(2),
                                          nn.ReLU())
        self.collection13 = nn.Sequential(nn.BatchNorm2d(1),
                                          nn.ReplicationPad2d(1),
                                          nn.Conv2d(1, 70, (1, 7)),
                                          nn.Dropout2d(), nn.MaxPool2d(2),
                                          nn.ReLU(), nn.BatchNorm2d(70),
                                          nn.ReplicationPad2d(1),
                                          nn.Conv2d(70, 140, (1, 5)),
                                          nn.Dropout2d(), nn.MaxPool2d(2),
                                          nn.ReLU(), nn.BatchNorm2d(140),
                                          nn.ReplicationPad2d(1),
                                          nn.Conv2d(140, 200, 3),
                                          nn.Dropout2d(), nn.MaxPool2d(2),
                                          nn.ReLU())
        self.collection21 = nn.Sequential(
            nn.Linear(3000, 50),
            nn.ReLU(),
            nn.Dropout2d(),
        )
        self.collection22 = nn.Sequential(
            nn.Linear(800, 50),
            nn.ReLU(),
            nn.Dropout2d(),
        )
        self.collection23 = nn.Sequential(
            nn.Linear(400, 50),
            nn.ReLU(),
            nn.Dropout2d(),
        )
        self.collection31 = nn.Sequential(
            nn.BatchNorm1d(4),
            nn.ReLU(),
            nn.Dropout2d(),
        )
        self.collection32 = nn.Sequential(
            nn.BatchNorm1d(1),
            nn.ReLU(),
            nn.Dropout2d(),
        )
        self.collection41 = nn.Sequential(
            nn.Linear(3 * 50 + 4 + 1, number_of_classes),
            nn.ReLU(),
            nn.Softmax(dim=1),
        )
        self.collection42 = nn.Threshold(0.9, 0)

        self.collection43 = []
        for i in range(number_of_classes):
            self.collection43.append(
                nn.Sequential(nn.Linear(1, 50), nn.ReLU(), nn.Linear(50, 1),
                              nn.ReLU()))
示例#10
0
    def __init__(self, im_size, max_depth):
        super(UniversalAutoencoder, self).__init__()
        im_height, im_width, im_depth = im_size
        base_depth = 64
        depth_mult = 1
        kernel_size = 3
        original_im_width = im_width
        original_im_height = im_height
        original_im_depth = im_depth

        # Encoder sequential generation
        layers = [
            nn.ReplicationPad2d(
                (math.floor(kernel_size / 2), math.floor(kernel_size / 2),
                 math.floor(kernel_size / 2), math.floor(kernel_size / 2))),
            nn.Conv2d(im_depth, base_depth, kernel_size, 2),
            nn.ReLU(inplace=True)
        ]
        im_width = math.floor(
            (im_width + 2 * math.floor(kernel_size / 2) - kernel_size) / 2 + 1)
        im_height = math.floor(
            (im_height + 2 * math.floor(kernel_size / 2) - kernel_size) / 2 +
            1)
        im_depth = base_depth * depth_mult
        while im_width != 2:
            if im_depth < max_depth:
                depth_mult *= 2
            layers.extend([
                nn.ReplicationPad2d(
                    (math.floor(kernel_size / 2), math.floor(kernel_size / 2),
                     math.floor(kernel_size / 2),
                     math.floor(kernel_size / 2))),
                nn.Conv2d(im_depth, base_depth * depth_mult, kernel_size, 2),
                nn.BatchNorm2d(base_depth * depth_mult),
                nn.ReLU(inplace=True)
            ])
            im_width = math.floor((im_width + 2 * math.floor(kernel_size / 2) -
                                   kernel_size) / 2 + 1)
            im_height = math.floor(
                (im_height + 2 * math.floor(kernel_size / 2) - kernel_size) /
                2 + 1)
            im_depth = base_depth * depth_mult
        layers.extend([
            nn.ReplicationPad2d(
                (math.floor(kernel_size / 2), math.floor(kernel_size / 2),
                 math.floor(kernel_size / 2), math.floor(kernel_size / 2))),
            nn.Conv2d(im_depth, base_depth * depth_mult, kernel_size, 2),
            nn.ReLU(inplace=True)
        ])
        self.encoder = nn.Sequential(*layers)

        #Decoder sequential generation
        layers_num = round(math.log(original_im_width, 2))
        channel_num = [original_im_depth, base_depth]
        channel_num.extend(
            [base_depth * i for i in (2**j for j in range(1, layers_num))])
        channel_num = [min(i, max_depth) for i in channel_num]
        print(channel_num)
        channel_num = channel_num[::-1]
        layers = []
        for i in range(len(channel_num) - 2):
            layers.extend([
                nn.Upsample(scale_factor=2),
                nn.ReplicationPad2d(
                    (math.floor(kernel_size / 2), math.floor(kernel_size / 2),
                     math.floor(kernel_size / 2),
                     math.floor(kernel_size / 2))),
                nn.Conv2d(channel_num[i], channel_num[i + 1], kernel_size, 1),
                nn.BatchNorm2d(channel_num[i + 1]),
                nn.LeakyReLU(inplace=True, negative_slope=0.2)
            ])
        layers.extend([
            nn.Upsample(scale_factor=2),
            nn.ReplicationPad2d(
                (math.floor(kernel_size / 2), math.floor(kernel_size / 2),
                 math.floor(kernel_size / 2), math.floor(kernel_size / 2))),
            nn.Conv2d(channel_num[-2], channel_num[-1], kernel_size, 1),
            nn.Tanh()
        ])
        self.decoder = nn.Sequential(*layers)
示例#11
0
    def __init__(self,
                 input_nc,
                 outer_nc,
                 inner_nc,
                 nz=0,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 norm_layer=None,
                 nl_layer=None,
                 use_dropout=False,
                 upsample='basic',
                 padding_type='zero'):
        super(UnetBlock_with_z, self).__init__()
        p = 0
        downconv = []
        if padding_type == 'reflect':
            downconv += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            downconv += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        self.outermost = outermost
        self.innermost = innermost
        self.nz = nz
        input_nc = input_nc + nz
        downconv += [
            nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=p)
        ]
        # downsample is different from upsample
        downrelu = nn.LeakyReLU(0.2, True)
        uprelu = nl_layer()

        if outermost:
            upconv = upsampleLayer(inner_nc * 2,
                                   outer_nc,
                                   upsample=upsample,
                                   padding_type=padding_type)
            down = downconv
            up = [uprelu] + upconv + [nn.Tanh()]
        elif innermost:
            upconv = upsampleLayer(inner_nc,
                                   outer_nc,
                                   upsample=upsample,
                                   padding_type=padding_type)
            down = [downrelu] + downconv
            up = [uprelu] + upconv
            if norm_layer is not None:
                up += [norm_layer(outer_nc)]
        else:
            upconv = upsampleLayer(inner_nc * 2,
                                   outer_nc,
                                   upsample=upsample,
                                   padding_type=padding_type)
            down = [downrelu] + downconv
            if norm_layer is not None:
                down += [norm_layer(inner_nc)]
            up = [uprelu] + upconv

            if norm_layer is not None:
                up += [norm_layer(outer_nc)]

            if use_dropout:
                up += [nn.Dropout(0.5)]
        self.down = nn.Sequential(*down)
        self.submodule = submodule
        self.up = nn.Sequential(*up)
示例#12
0
    def __init__(self, number_of_classes=2, dropout_rate=0.5, name=None, type='Classifier'):
        super(RapidEClassifier, self).__init__()
        self.number_of_classes = number_of_classes
        self.dropout_rate = dropout_rate
        self.features = ["Scatter", "Spectrum", "Lifetime 1", "Lifetime 2", "Size"]
        self.name = name
        self.type = type

        self.scatterConv1 = nn.Sequential(
            nn.ReplicationPad2d(2),
            nn.Conv2d(1, 10, 5), nn.Dropout2d(dropout_rate), nn.MaxPool2d(2), nn.ReLU()
        )

        self.batchNormScatter = nn.BatchNorm2d(10)

        self.scatterConv2 = nn.Sequential(
            nn.ReplicationPad2d(1),
            nn.Conv2d(10, 20, 3), nn.Dropout2d(dropout_rate), nn.MaxPool2d(2), nn.ReLU()
        )

        self.spectrumnConv1 = nn.Sequential(
            nn.ReplicationPad2d(2),
            nn.Conv2d(1, 50, 5), nn.Dropout2d(dropout_rate), nn.MaxPool2d(2), nn.ReLU()
        )

        self.batchNormSpectrum = nn.BatchNorm2d(50)

        self.spectrumnConv2 = nn.Sequential(
            nn.ReplicationPad2d(1),
            nn.Conv2d(50, 100, 3), nn.Dropout2d(dropout_rate), nn.MaxPool2d(2), nn.ReLU()
        )

        self.lifetimeConv1 = nn.Sequential(
            nn.ReplicationPad2d(1),
            nn.Conv2d(1, 70, (1, 7)), nn.Dropout2d(dropout_rate), nn.MaxPool2d(2), nn.ReLU()
        )

        self.batchNormLifetime1 = nn.BatchNorm2d(70)

        self.lifetimeConv2 = nn.Sequential(
            nn.ReplicationPad2d(1),
            nn.Conv2d(70, 140, (1, 5)), nn.Dropout2d(dropout_rate), nn.MaxPool2d(2), nn.ReLU()
        )

        self.batchNormLifetime2 = nn.BatchNorm2d(140)

        self.lifetimeConv3 = nn.Sequential(
            nn.ReplicationPad2d(1),
            nn.Conv2d(140, 200, 3), nn.Dropout2d(dropout_rate), nn.MaxPool2d(2), nn.ReLU()
        )

        # FC layers

        self.batchNormFCScatter = nn.BatchNorm1d(3000)
        self.batchNormFCSpectrum = nn.BatchNorm1d(800)
        self.batchNormFCLifetime = nn.BatchNorm1d(400)

        self.FCScatter = nn.Sequential(
            nn.Linear(3000, 50), nn.ReLU(), nn.Dropout2d(dropout_rate),
        )
        self.FCSpectrum = nn.Sequential(
            nn.Linear(800, 50), nn.ReLU(), nn.Dropout2d(dropout_rate),
        )
        self.FCLifetime1 = nn.Sequential(
            nn.Linear(400, 50), nn.ReLU(), nn.Dropout2d(dropout_rate),

        )

        self.FCLifetime2 = nn.Sequential(
            nn.ReLU(), nn.Dropout2d(dropout_rate)
        )
        self.FCSize = nn.Sequential(
            nn.ReLU(), nn.Dropout2d(dropout_rate)
        )

        self.batchNormFinal = nn.BatchNorm1d(155)

        self.FCFinal = nn.Sequential(
            nn.Linear(155, number_of_classes), nn.ReLU(),
            nn.Softmax(dim=1)

        )
示例#13
0
 def __append_layer(self, net_style, args_dict):
     args_values_list = list(args_dict.values())
     if net_style == "Conv2d":
         self.layers.append(
             nn.Conv2d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "MaxPool2d":
         self.layers.append(
             nn.MaxPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "reshape":
         # 如果是特殊情况 reshape,就直接将目标向量尺寸传入
         # print(type(args_values_list[0]))
         self.layers.append(args_values_list[0])
     elif net_style == "Conv1d":
         self.layers.append(
             nn.Conv1d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "Conv3d":
         self.layers.append(
             nn.Conv3d(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4], args_values_list[5],
                       args_values_list[6], args_values_list[7]))
     elif net_style == "ConvTranspose1d":
         self.layers.append(
             nn.ConvTranspose1d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose2d":
         self.layers.append(
             nn.ConvTranspose2d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "ConvTranspose3d":
         self.layers.append(
             nn.ConvTranspose3d(args_values_list[0], args_values_list[1],
                                args_values_list[2], args_values_list[3],
                                args_values_list[4], args_values_list[5],
                                args_values_list[6], args_values_list[7],
                                args_values_list[8]))
     elif net_style == "Unfold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3]))
     elif net_style == "Fold":
         self.layers.append(
             nn.Unfold(args_values_list[0], args_values_list[1],
                       args_values_list[2], args_values_list[3],
                       args_values_list[4]))
     elif net_style == "MaxPool1d":
         self.layers.append(
             nn.MaxPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxPool3d":
         self.layers.append(
             nn.MaxPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4], args_values_list[5]))
     elif net_style == "MaxUnpool1d":
         self.layers.append(
             nn.MaxUnpool1d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool2d":
         self.layers.append(
             nn.MaxUnpool2d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "MaxUnpool3d":
         self.layers.append(
             nn.MaxUnpool3d(args_values_list[0], args_values_list[1],
                            args_values_list[2]))
     elif net_style == "AvgPool1d":
         self.layers.append(
             nn.AvgPool1d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool2d":
         self.layers.append(
             nn.AvgPool2d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "AvgPool3d":
         self.layers.append(
             nn.AvgPool3d(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3],
                          args_values_list[4]))
     elif net_style == "FractionalMaxPool2d":
         self.layers.append(
             nn.FractionalMaxPool2d(args_values_list[0],
                                    args_values_list[1],
                                    args_values_list[2],
                                    args_values_list[3],
                                    args_values_list[4]))
     elif net_style == "LPPool1d":
         self.layers.append(
             nn.LPPool1d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "LPPool2d":
         self.layers.append(
             nn.LPPool2d(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3]))
     elif net_style == "AdaptiveMaxPool1d":
         self.layers.append(
             nn.AdaptiveMaxPool1d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool2d":
         self.layers.append(
             nn.AdaptiveMaxPool2d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveMaxPool3d":
         self.layers.append(
             nn.AdaptiveMaxPool3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AdaptiveAvgPool1d":
         self.layers.append(nn.AdaptiveAvgPool1d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool2d":
         self.layers.append(nn.AdaptiveAvgPool2d(args_values_list[0]))
     elif net_style == "AdaptiveAvgPool3d":
         self.layers.append(nn.AdaptiveAvgPool3d(args_values_list[0]))
     elif net_style == "ReflectionPad1d":
         self.layers.append(nn.ReflectionPad1d(args_values_list[0]))
     elif net_style == "ReflectionPad2d":
         self.layers.append(nn.ReflectionPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad1d":
         self.layers.append(nn.ReplicationPad1d(args_values_list[0]))
     elif net_style == "ReplicationPad2d":
         self.layers.append(nn.ReplicationPad2d(args_values_list[0]))
     elif net_style == "ReplicationPad3d":
         self.layers.append(nn.ReplicationPad3d(args_values_list[0]))
     elif net_style == "ZeroPad2d":
         self.layers.append(nn.ZeroPad2d(args_values_list[0]))
     elif net_style == "ConstantPad1d":
         self.layers.append(
             nn.ConstantPad1d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad2d":
         self.layers.append(
             nn.ConstantPad2d(args_values_list[0], args_values_list[1]))
     elif net_style == "ConstantPad3d":
         self.layers.append(
             nn.ConstantPad3d(args_values_list[0], args_values_list[1]))
     elif net_style == "ELU":
         self.layers.append(nn.ELU(args_values_list[0],
                                   args_values_list[1]))
     elif net_style == "Hardshrink":
         self.layers.append(nn.Hardshrink(args_values_list[0]))
     elif net_style == "Hardtanh":
         self.layers.append(
             nn.Hardtanh(args_values_list[0], args_values_list[1],
                         args_values_list[2], args_values_list[3],
                         args_values_list[4]))
     elif net_style == "LeakyReLU":
         self.layers.append(
             nn.LeakyReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "LogSigmoid":
         self.layers.append(nn.LogSigmoid())
     elif net_style == "PReLU":
         self.layers.append(
             nn.PReLU(args_values_list[0], args_values_list[1]))
     elif net_style == "ReLU":
         self.layers.append(nn.ReLU(args_values_list[0]))
     elif net_style == "ReLU6":
         self.layers.append(nn.ReLU6(args_values_list[0]))
     elif net_style == "RReLU":
         self.layers.append(
             nn.RReLU(args_values_list[0], args_values_list[1],
                      args_values_list[2]))
     elif net_style == "SELU":
         self.layers.append(nn.SELU(args_values_list[0]))
     elif net_style == "CELU":
         self.layers.append(
             nn.CELU(args_values_list[0], args_values_list[1]))
     elif net_style == "Sigmoid":
         self.layers.append(nn.Sigmoid())
     elif net_style == "Softplus":
         self.layers.append(
             nn.Softplus(args_values_list[0], args_values_list[1]))
     elif net_style == "Softshrink":
         self.layers.append(nn.Softshrink(args_values_list[0]))
     elif net_style == "Softsign":
         self.layers.append(nn.Softsign())
     elif net_style == "Tanh":
         self.layers.append(nn.Tanh())
     elif net_style == "Tanhshrink":
         self.layers.append(nn.Tanhshrink())
     elif net_style == "Threshold":
         self.layers.append(
             nn.Threshold(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "Softmin":
         self.layers.append(nn.Softmin(args_values_list[0]))
     elif net_style == "Softmax":
         self.layers.append(nn.Softmax(args_values_list[0]))
     elif net_style == "Softmax2d":
         self.layers.append(nn.Softmax2d())
     elif net_style == "LogSoftmax":
         self.layers.append(nn.LogSoftmax(args_values_list[0]))
     elif net_style == "AdaptiveLogSoftmaxWithLoss":
         self.layers.append(
             nn.AdaptiveLogSoftmaxWithLoss(args_values_list[0],
                                           args_values_list[1],
                                           args_values_list[2],
                                           args_values_list[3],
                                           args_values_list[4]))
     elif net_style == "BatchNorm1d":
         self.layers.append(
             nn.BatchNorm1d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm2d":
         self.layers.append(
             nn.BatchNorm2d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "BatchNorm3d":
         self.layers.append(
             nn.BatchNorm3d(args_values_list[0], args_values_list[1],
                            args_values_list[2], args_values_list[3],
                            args_values_list[4]))
     elif net_style == "GroupNorm":
         self.layers.append(
             nn.GroupNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2], args_values_list[3]))
     elif net_style == "InstanceNorm1d":
         self.layers.append(
             nn.InstanceNorm1d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm2d":
         self.layers.append(
             nn.InstanceNorm2d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "InstanceNorm3d":
         self.layers.append(
             nn.InstanceNorm3d(args_values_list[0], args_values_list[1],
                               args_values_list[2], args_values_list[3],
                               args_values_list[4]))
     elif net_style == "LayerNorm":
         self.layers.append(
             nn.LayerNorm(args_values_list[0], args_values_list[1],
                          args_values_list[2]))
     elif net_style == "LocalResponseNorm":
         self.layers.append(
             nn.LocalResponseNorm(args_values_list[0], args_values_list[1],
                                  args_values_list[2], args_values_list[3]))
     elif net_style == "Linear":
         self.layers.append(
             nn.Linear(args_values_list[0], args_values_list[1],
                       args_values_list[2]))
     elif net_style == "Dropout":
         self.layers.append(
             nn.Dropout(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout2d":
         self.layers.append(
             nn.Dropout2d(args_values_list[0], args_values_list[1]))
     elif net_style == "Dropout3d":
         self.layers.append(
             nn.Dropout3d(args_values_list[0], args_values_list[1]))
     elif net_style == "AlphaDropout":
         self.layers.append(
             nn.AlphaDropout(args_values_list[0], args_values_list[1]))
示例#14
0
文件: sss_net.py 项目: Shuai-Xie/S3N
    def generate_map(self, input_x, class_response_maps, p):
        N, C, H, W = class_response_maps.size()  # N, 200

        # descend score of 200 class
        # top_score, top_class
        score_pred, sort_number = torch.sort(
            F.softmax(F.adaptive_avg_pool2d(class_response_maps, 1),
                      dim=1),  # prob of each class
            dim=1,
            descending=True)

        # estimate sparse attention
        # H: top5 entropy sum
        gate_score = (score_pred[:, 0:5] * torch.log(score_pred[:, 0:5])).sum(
            1)  # -

        xs = []
        xs_inv = []

        for idx_i in range(N):  # each img
            # R: response map -> H,W
            if gate_score[idx_i] > -0.2:  # thre, sum(top_5); [0.1, 0.3]
                decide_map = class_response_maps[idx_i,
                                                 sort_number[idx_i,
                                                             0], :, :]  # top1
            else:  # not sum but mean
                decide_map = class_response_maps[idx_i,
                                                 sort_number[idx_i,
                                                             0:5], :, :].mean(
                                                                 0)  # top5

            # normalize R
            min_value, max_value = decide_map.min(), decide_map.max()
            decide_map = (decide_map - min_value) / (max_value - min_value)

            # peaks, local max
            peak_list, aggregation = peak_stimulation(decide_map,
                                                      win_size=3,
                                                      peak_filter=_mean_filter)
            decide_map = decide_map.squeeze(0).squeeze(0)  # 1,1,H,W

            score = [decide_map[item[2], item[3]] for item in peak_list]
            x = [item[3] for item in peak_list]
            y = [item[2] for item in peak_list]

            if score == []:
                temp = torch.zeros(1, 1, self.grid_size, self.grid_size).cuda()
                temp += self.base_ratio
                xs.append(temp)
                # xs_soft.append(temp)
                xs_inv.append(temp)
                continue

            peak_num = torch.arange(len(score))

            temp = self.base_ratio
            temp_w = self.base_ratio

            if p == 0:
                for i in peak_num:
                    temp += score[i] * kernel_generate(
                        self.radius(torch.sqrt(score[i])), H,
                        (x[i].item(),
                         y[i].item())).unsqueeze(0).unsqueeze(0).cuda()
                    temp_w += 1 / score[i] * \
                              kernel_generate(self.radius_inv(torch.sqrt(score[i])), H, (x[i].item(), y[i].item())).unsqueeze(0).unsqueeze(0).cuda()
            elif p == 1:
                for i in peak_num:
                    rd = random.uniform(0, 1)
                    if score[i] > rd:
                        temp += score[i] * kernel_generate(
                            self.radius(torch.sqrt(score[i])), H,
                            (x[i].item(),
                             y[i].item())).unsqueeze(0).unsqueeze(0).cuda()
                    else:
                        temp_w += 1 / score[i] * \
                                  kernel_generate(self.radius_inv(torch.sqrt(score[i])), H, (x[i].item(), y[i].item())).unsqueeze(0).unsqueeze(0).cuda()
            elif p == 2:
                index = score.index(max(score))
                temp += score[index] * kernel_generate(
                    self.radius(score[index]), H,
                    (x[index].item(),
                     y[index].item())).unsqueeze(0).unsqueeze(0).cuda()

                index = score.index(min(score))
                temp_w += 1 / score[index] * \
                          kernel_generate(self.radius_inv(torch.sqrt(score[index])), H, (x[index].item(), y[index].item())).unsqueeze(0).unsqueeze(0).cuda()

            if type(temp) == float:
                temp += torch.zeros(1, 1, self.grid_size,
                                    self.grid_size).cuda()
            xs.append(temp)

            if type(temp_w) == float:
                temp_w += torch.zeros(1, 1, self.grid_size,
                                      self.grid_size).cuda()
            xs_inv.append(temp_w)

        xs = torch.cat(xs, 0)
        xs_hm = nn.ReplicationPad2d(self.padding_size)(xs)
        grid = self.create_grid(xs_hm).to(input_x.device)
        x_sampled_zoom = F.grid_sample(input_x, grid)

        xs_inv = torch.cat(xs_inv, 0)
        xs_hm_inv = nn.ReplicationPad2d(self.padding_size)(xs_inv)
        grid_inv = self.create_grid(xs_hm_inv).to(input_x.device)
        x_sampled_inv = F.grid_sample(input_x, grid_inv)

        return x_sampled_zoom, x_sampled_inv
示例#15
0
文件: image.py 项目: kintatta/d3rl
 def _setup(self, x):
     height, width = x.shape[-2:]
     self._operation = nn.Sequential(nn.ReplicationPad2d(self.shift_size),
                                     aug.RandomCrop((height, width)))
示例#16
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 kernel_size,
                 stride,
                 padding=0,
                 norm='none',
                 activation='relu',
                 pad_type='zero',
                 style_dim=3,
                 norm_after_conv='ln'):
        super().__init__()
        self.use_bias = True
        self.norm_type = norm
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        #self.pad = nn.Identity()

        # initialize normalization
        self.compute_kernel = True if norm == 'conv_kernel' else False
        self.WCT = True if norm == 'WCT' else False

        norm_dim = output_dim

        if norm == 'bn':
            self.norm = nn.BatchNorm2d(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'ln':
            self.norm = LayerNorm(norm_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == 'WCT':
            self.norm = nn.InstanceNorm2d(norm_dim)
            self.style_dim = style_dim
            self.dim = output_dim, input_dim, kernel_size, kernel_size
            self.output_dim = output_dim
            self.stride = stride
            self.mlp_W = nn.Sequential(
                nn.Linear(self.style_dim, output_dim**2), )
            self.mlp_bias = nn.Sequential(
                nn.Linear(self.style_dim, output_dim), )
        elif norm == 'none' or norm == 'sn':
            self.norm = None
        elif norm == 'conv_kernel':
            self.style_dim = style_dim
            self.norm_after_conv = norm_after_conv
            self._get_norm(self.norm_after_conv, norm_dim)
            self.dim = output_dim, input_dim, kernel_size, kernel_size
            self.stride = stride
            self.mlp_kernel = nn.Linear(self.style_dim, int(np.prod(self.dim)))
            self.mlp_bias = nn.Linear(self.style_dim, output_dim)
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # initialize convolution
        if norm == 'sn':
            self.conv = SpectralNorm(
                nn.Conv2d(input_dim,
                          output_dim,
                          kernel_size,
                          stride,
                          bias=self.use_bias))
        else:
            self.conv = nn.Conv2d(input_dim,
                                  output_dim,
                                  kernel_size,
                                  stride,
                                  bias=self.use_bias)

        self.style = None
示例#17
0
    def __init__(
        self,
        x_channels: int,
        hidden_channels,
        depth=2,
        gru_kernel_size=1,
        ortho_init=True,
        instance_norm=False,
        dense_connection=0,
        replication_padding=False,
    ):
        super().__init__()
        self.depth = depth
        self.x_channels = x_channels
        self.hidden_channels = hidden_channels
        self.instance_norm = instance_norm
        self.dense_connection = dense_connection
        self.repl_pad = replication_padding

        self.reset_gates = nn.ModuleList([])
        self.update_gates = nn.ModuleList([])
        self.out_gates = nn.ModuleList([])
        self.conv_blocks = nn.ModuleList([])

        # Create convolutional blocks of RIM cell
        for idx in range(depth + 1):
            in_ch = (x_channels + 2 if idx == 0 else
                     (1 + min(idx, dense_connection)) * hidden_channels)
            out_ch = hidden_channels if idx < depth else x_channels
            pad = 0 if replication_padding else (2 if idx == 0 else 1)
            block = []
            if replication_padding:
                if idx == 1:
                    block.append(nn.ReplicationPad2d(2))
                else:
                    block.append(nn.ReplicationPad2d(2 if idx == 0 else 1))
            block.append(
                nn.Conv2d(
                    in_ch,
                    out_ch,
                    5 if idx == 0 else 3,
                    dilation=(2 if idx == 1 else 1),
                    padding=pad,
                ))
            self.conv_blocks.append(nn.Sequential(*block))

        # Create GRU blocks of RIM cell
        for idx in range(depth):
            for gru_part in [
                    self.reset_gates, self.update_gates, self.out_gates
            ]:
                block = []
                if instance_norm:
                    block.append(nn.InstanceNorm2d(2 * hidden_channels))
                block.append(
                    nn.Conv2d(
                        2 * hidden_channels,
                        hidden_channels,
                        gru_kernel_size,
                        padding=gru_kernel_size // 2,
                    ))
                gru_part.append(nn.Sequential(*block))

        if ortho_init:
            for reset_gate, update_gate, out_gate in zip(
                    self.reset_gates, self.update_gates, self.out_gates):
                nn.init.orthogonal_(reset_gate[-1].weight)
                nn.init.orthogonal_(update_gate[-1].weight)
                nn.init.orthogonal_(out_gate[-1].weight)
                nn.init.constant_(reset_gate[-1].bias, -1.0)
                nn.init.constant_(update_gate[-1].bias, 0.0)
                nn.init.constant_(out_gate[-1].bias, 0.0)
示例#18
0
    def __init__(self,
                 n_planes,
                 factor,
                 kernel_type,
                 phase=0,
                 kernel_width=None,
                 support=None,
                 sigma=None,
                 preserve_size=False):
        super(Downsampler, self).__init__()

        assert phase in [0, 0.5], 'phase should be 0 or 0.5'

        if kernel_type == 'lanczos2':
            support = 2
            kernel_width = 4 * factor + 1
            kernel_type_ = 'lanczos'

        elif kernel_type == 'lanczos3':
            support = 3
            kernel_width = 6 * factor + 1
            kernel_type_ = 'lanczos'

        elif kernel_type == 'gauss12':
            kernel_width = 7
            sigma = 1 / 2
            kernel_type_ = 'gauss'

        elif kernel_type == 'gauss1sq2':
            kernel_width = 9
            sigma = 1. / np.sqrt(2)
            kernel_type_ = 'gauss'

        elif kernel_type in ['lanczos', 'gauss', 'box']:
            kernel_type_ = kernel_type

        else:
            assert False, 'wrong name kernel'

        # note that `kernel width` will be different to actual size for phase = 1/2
        self.kernel = get_kernel(factor,
                                 kernel_type_,
                                 phase,
                                 kernel_width,
                                 support=support,
                                 sigma=sigma)

        downsampler = nn.Conv2d(n_planes,
                                n_planes,
                                kernel_size=self.kernel.shape,
                                stride=factor,
                                padding=0)
        downsampler.weight.data[:] = 0
        downsampler.bias.data[:] = 0

        kernel_torch = torch.from_numpy(self.kernel)
        for i in range(n_planes):
            downsampler.weight.data[i, i] = kernel_torch

        self.downsampler_ = downsampler

        if preserve_size:

            if self.kernel.shape[0] % 2 == 1:
                pad = int((self.kernel.shape[0] - 1) / 2.)
            else:
                pad = int((self.kernel.shape[0] - factor) / 2.)

            self.padding = nn.ReplicationPad2d(pad)

        self.preserve_size = preserve_size
示例#19
0
    def __init__(self, n_class, code_size, channels=3):
        super(DSN, self).__init__()
        #input_image 32*32
        self.channels = channels
        self.private_enc_src = nn.Sequential()
        self.private_enc_tgt = nn.Sequential()
        self.shared_enc = nn.Sequential()
        self.shared_dec = nn.Sequential()
        self.classifier = nn.Sequential()

        ##PRIVATE ENCODER for source
        self.private_enc_src.add_module(
            'pri_enc_conv1',
            nn.Conv2d(in_channels=channels,
                      out_channels=32,
                      kernel_size=5,
                      padding=2))
        self.private_enc_src.add_module('pri_enc_relu1', nn.ReLU())
        self.private_enc_src.add_module('pri_enc_bn1', nn.BatchNorm2d(32))
        self.private_enc_src.add_module('pri_enc_pool1',
                                        nn.MaxPool2d(kernel_size=2,
                                                     stride=2))  #32*16*16

        self.private_enc_src.add_module(
            'pri_enc_conv2',
            nn.Conv2d(in_channels=32,
                      out_channels=64,
                      kernel_size=5,
                      padding=2))
        self.private_enc_src.add_module('pri_enc_relu2', nn.ReLU())
        self.private_enc_src.add_module('pri_enc_bn2', nn.BatchNorm2d(64))
        self.private_enc_src.add_module('pri_enc_pool2',
                                        nn.MaxPool2d(kernel_size=2,
                                                     stride=2))  #64*8*8
        #reshape
        self.private_fc_src = nn.Sequential(nn.Linear(64 * 8 * 8, code_size),
                                            nn.ReLU(),
                                            nn.BatchNorm2d(code_size))

        ##PRIVATE ENCODER for target
        self.private_enc_tgt.add_module(
            'pri_enc_conv1_1',
            nn.Conv2d(in_channels=channels,
                      out_channels=32,
                      kernel_size=5,
                      padding=2))
        self.private_enc_tgt.add_module('pri_enc_relu1_1', nn.ReLU())
        self.private_enc_tgt.add_module('pri_enc_bn1_1', nn.BatchNorm2d(32))
        self.private_enc_tgt.add_module('pri_enc_pool1_1',
                                        nn.MaxPool2d(kernel_size=2,
                                                     stride=2))  # 32*16*16

        self.private_enc_tgt.add_module(
            'pri_enc_conv2_1',
            nn.Conv2d(in_channels=32,
                      out_channels=64,
                      kernel_size=5,
                      padding=2))
        self.private_enc_tgt.add_module('pri_enc_relu2_1', nn.ReLU())
        self.private_enc_tgt.add_module('pri_enc_bn2_1', nn.BatchNorm2d(64))
        self.private_enc_tgt.add_module('pri_enc_pool2_1',
                                        nn.MaxPool2d(kernel_size=2,
                                                     stride=2))  # 64*8*8
        # reshape
        self.private_fc_tgt = nn.Sequential(nn.Linear(64 * 8 * 8, code_size),
                                            nn.ReLU(),
                                            nn.BatchNorm2d(code_size))

        ##SHARED_ENCODER
        self.shared_enc.add_module(
            'shd_enc_conv1',
            nn.Conv2d(in_channels=channels,
                      out_channels=64,
                      kernel_size=5,
                      padding=2))
        self.shared_enc.add_module('shd_enc_relu1', nn.ReLU())
        self.shared_enc.add_module('shd_enc_pool1',
                                   nn.MaxPool2d(kernel_size=3,
                                                stride=2,
                                                padding=1))  #32*16*16

        self.shared_enc.add_module(
            'shd_enc_conv2',
            nn.Conv2d(in_channels=64,
                      out_channels=64,
                      kernel_size=5,
                      padding=2))
        self.shared_enc.add_module('shd_enc_relu2', nn.ReLU())
        self.shared_enc.add_module('shd_enc_pool2',
                                   nn.MaxPool2d(kernel_size=3,
                                                stride=2,
                                                padding=1))  #64*8*8
        # reshape
        self.shd_enc_fc = nn.Sequential(nn.Linear(64 * 8 * 8, code_size),
                                        nn.ReLU())
        # DOMIAN CLASSIFIER
        self.domain_dis = nn.Sequential(nn.Linear(code_size, 100), nn.ReLU(),
                                        nn.Linear(100, 2))

        # LABEL CLASSIFIER
        self.classifier = nn.Sequential(nn.Linear(code_size, 2048), nn.ReLU(),
                                        nn.Linear(2048, n_class))

        ##SHARED_DECODER
        self.shd_dec_fc = nn.Sequential(nn.Linear(code_size, 300), nn.ReLU(),
                                        nn.BatchNorm2d(300))
        # reshape b*3*10*10
        self.shared_dec.add_module('shd_dec_conv1',
                                   nn.Conv2d(in_channels=3,
                                             out_channels=16,
                                             kernel_size=5,
                                             padding=2))  #3*10*10
        self.shared_dec.add_module('shd_dec_relu1', nn.ReLU())
        self.shared_dec.add_module('shd_dec_bn1', nn.BatchNorm2d(16))

        self.shared_dec.add_module(
            'shd_dec_conv2',
            nn.Conv2d(in_channels=16,
                      out_channels=16,
                      kernel_size=5,
                      padding=2))
        self.shared_dec.add_module('shd_dec_relu2', nn.ReLU())
        self.shared_dec.add_module('shd_dec_bn2', nn.BatchNorm2d(16))
        self.shared_dec.add_module('shd_dec_Up2', nn.Upsample([30,
                                                               30]))  #32*32*32
        self.shared_dec.add_module('shd_dec_Up2_2',
                                   nn.ReplicationPad2d([1, 1, 1, 1]))

        self.shared_dec.add_module(
            'shd_dec_conv3',
            nn.Conv2d(in_channels=16,
                      out_channels=16,
                      kernel_size=3,
                      padding=1))
        self.shared_dec.add_module('shd_dec_relu3', nn.ReLU())
        self.shared_dec.add_module('shd_dec_bn3', nn.BatchNorm2d(16))
        self.shared_dec.add_module(
            'shd_dec_conv4',
            nn.Conv2d(in_channels=16,
                      out_channels=channels,
                      kernel_size=3,
                      padding=1))
        self.shared_dec.add_module('shd_dec_bn4', nn.BatchNorm2d(channels))
示例#20
0
            print('Stage one : Pretrain the Spc_UpNet.')
            Spc_up = Spc_UpNet(im_h)
            H_RGB = Spc_up(torch.unsqueeze(im_m, 0))
            print('Stage two : Pretrain the Spa_UpNet.')
            Spa_up = Spa_UpNet(torch.squeeze(H_RGB, 0))
            H_HSI = Spa_up(torch.unsqueeze(im_h, 0))
            net_input = Variable(0.8 * H_RGB + 0.2 * H_HSI).cuda()
        else:
            net_input = Variable(torch.unsqueeze(torch.rand_like(im_gt),
                                                 0)).cuda()

        if U_spa == 1:

            #Learnable spatial downsampler
            KS = 32
            dow = nn.Sequential(nn.ReplicationPad2d(int((KS - factor) / 2.)),
                                nn.Conv2d(1, 1, KS, factor))

            class Apply(nn.Module):
                def __init__(self, what, dim, *args):
                    super(Apply, self).__init__()
                    self.dim = dim
                    self.what = what

                def forward(self, input):
                    inputs = []
                    for i in range(input.size(self.dim)):
                        inputs.append(self.what(input.narrow(self.dim, i, 1)))
                    return torch.cat(inputs, dim=self.dim)

                def __len__(self):
示例#21
0
    def __init__(self, nc, ngf, ndf, latent_variable_size):
        super(VAE, self).__init__()
        #self.cuda = True
        self.nc = nc
        self.ngf = ngf
        self.ndf = ndf
        self.latent_variable_size = latent_variable_size

        # encoder
        self.e1 = nn.Conv2d(nc, ndf, 4, 2, 1)
        self.bn1 = nn.BatchNorm2d(ndf)

        self.e2 = nn.Conv2d(ndf, ndf * 2, 4, 2, 1)
        self.bn2 = nn.BatchNorm2d(ndf * 2)

        self.e3 = nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1)
        self.bn3 = nn.BatchNorm2d(ndf * 4)

        self.e4 = nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1)
        self.bn4 = nn.BatchNorm2d(ndf * 8)

        self.e5 = nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1)
        self.bn5 = nn.BatchNorm2d(ndf * 16)

        self.e6 = nn.Conv2d(ndf * 16, ndf * 32, 4, 2, 1)
        self.bn6 = nn.BatchNorm2d(ndf * 32)

        self.e7 = nn.Conv2d(ndf * 32, ndf * 64, 4, 2, 1)
        self.bn7 = nn.BatchNorm2d(ndf * 64)

        self.fc1 = nn.Linear(ndf * 64 * 4 * 4, latent_variable_size)
        self.fc2 = nn.Linear(ndf * 64 * 4 * 4, latent_variable_size)

        # decoder
        self.d1 = nn.Linear(latent_variable_size, ngf * 64 * 4 * 4)

        self.up1 = nn.UpsamplingNearest2d(scale_factor=2)
        self.pd1 = nn.ReplicationPad2d(1)
        self.d2 = nn.Conv2d(ngf * 64, ngf * 32, 3, 1)
        self.bn8 = nn.BatchNorm2d(ngf * 32, 1.e-3)

        self.up2 = nn.UpsamplingNearest2d(scale_factor=2)
        self.pd2 = nn.ReplicationPad2d(1)
        self.d3 = nn.Conv2d(ngf * 32, ngf * 16, 3, 1)
        self.bn9 = nn.BatchNorm2d(ngf * 16, 1.e-3)

        self.up3 = nn.UpsamplingNearest2d(scale_factor=2)
        self.pd3 = nn.ReplicationPad2d(1)
        self.d4 = nn.Conv2d(ngf * 16, ngf * 8, 3, 1)
        self.bn10 = nn.BatchNorm2d(ngf * 8, 1.e-3)

        self.up4 = nn.UpsamplingNearest2d(scale_factor=2)
        self.pd4 = nn.ReplicationPad2d(1)
        self.d5 = nn.Conv2d(ngf * 8, ngf * 4, 3, 1)
        self.bn11 = nn.BatchNorm2d(ngf * 4, 1.e-3)

        self.up5 = nn.UpsamplingNearest2d(scale_factor=2)
        self.pd5 = nn.ReplicationPad2d(1)
        self.d6 = nn.Conv2d(ngf * 4, ngf * 2, 3, 1)
        self.bn12 = nn.BatchNorm2d(ngf * 2, 1.e-3)

        self.up6 = nn.UpsamplingNearest2d(scale_factor=2)
        self.pd6 = nn.ReplicationPad2d(1)
        self.d7 = nn.Conv2d(ngf * 2, ngf, 3, 1)
        self.bn13 = nn.BatchNorm2d(ngf, 1.e-3)

        self.up7 = nn.UpsamplingNearest2d(scale_factor=2)
        self.pd7 = nn.ReplicationPad2d(1)
        self.d8 = nn.Conv2d(ngf, nc, 3, 1)

        self.leakyrelu = nn.LeakyReLU(0.2)
        self.relu = nn.ReLU()
        #self.sigmoid = nn.Sigmoid()
        self.maxpool = nn.MaxPool2d((2, 2), (2, 2))
示例#22
0
 def __init__(self, in_channels, out_channels, shape):
     super(UpConv, self).__init__()
     self.upsample = nn.Upsample(scale_factor=(2, 2), mode='bilinear')
     self.pad = nn.ReplicationPad2d((0, 1, 0, 1))
     self.conv = nn.Conv2d(in_channels, out_channels, shape, stride=1)
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# ==============================================================================
from __future__ import division
import os
import numpy as np
import torch
from torch import optim
from torch.nn.utils import clip_grad_norm_
import kornia.augmentation as aug
import torch.nn as nn
import torch.nn.functional as F
from model.SimSiamModel import DQN

random_shift = nn.Sequential(aug.RandomCrop((80, 80)), nn.ReplicationPad2d(4),
                             aug.RandomCrop((84, 84)))
aug = random_shift


def D(p, z, version='simplified'):
    if version == 'original':
        z = z.detach()
        p = F.normalize(p, dim=1)
        z = F.normalize(z, dim=1)
        return -(p * z).sum(dim=1).mean()
    else:
        return -F.cosine_similarity(p, z.detach(), dim=-1).mean()


class AgentSimsiam():
示例#24
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 conv_type,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 pad_type='zero',
                 activation='lrelu',
                 norm='none',
                 sn=False):
        super(Conv2dLayer, self).__init__()
        # Initialize the padding scheme
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # Initialize the normalization type
        if norm == 'bn':
            self.norm = nn.BatchNorm2d(out_channels)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(out_channels)
        elif norm == 'ln':
            self.norm = LayerNorm(out_channels)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # Initialize the activation funtion
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'sigmoid':
            self.activation = nn.Sigmoid()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # Initialize the convolution layers
        if sn:
            print("sn")
            self.conv2d = SpectralNorm(
                nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size,
                          stride,
                          padding=0,
                          dilation=dilation))
        else:
            if conv_type == 'normal':
                self.conv2d = nn.Conv2d(in_channels,
                                        out_channels,
                                        kernel_size,
                                        stride,
                                        padding=0,
                                        dilation=dilation)
            elif conv_type == 'partial':
                self.conv2d = partialconv2d.PartialConv2d(in_channels,
                                                          out_channels,
                                                          kernel_size,
                                                          stride,
                                                          padding=0,
                                                          dilation=dilation)
            else:
                print("conv_type not implemented")
示例#25
0
 def __init__(self):
     super(pwc_residual, self).__init__()
     self.pad = nn.ReplicationPad2d(2)
     self.FlowNet = PWCNet.PWCDCNet()
     self.mask = ada_mask(11)
示例#26
0
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
#   |-- downsampling -- |submodule| -- upsampling --|
class UnetBlock(nn.Module):
    def __init__(self, input_nc, outer_nc, inner_nc,
                 submodule=None, outermost=False, innermost=False,
                 norm_layer=None, 1nl_layer=None, use_dropout=False, upsample='basic', padding_type='zero'):
        super(UnetBlock, self).__init__()
        self.outermost = outermost
        p = 0
        downconv = []
        if padding_type == 'reflect':
            downconv += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            downconv += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError(
                'padding [%s] is not implemented' % padding_type)
        downconv += [nn.Conv2d(input_nc, inner_nc,
                               kernel_size=4, stride=2, padding=p)]
        # downsample is different from upsample
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = norm_layer(inner_nc) if norm_layer is not None else None
        uprelu = nl_layer()
        upnorm = norm_layer(outer_nc) if norm_layer is not None else None

        if outermost:
            upconv = upsampleLayer(
示例#27
0
    def __init__(self, pad_type='zero', path_to_vgg19_weights=None, pretrained=False):
        super(PretrainedVGG19FeatureExtractor, self).__init__()
        self.pad_type = pad_type

        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(1)
            padding = 0
        elif pad_type == 'zero':
            self.pad = EqualLayer()
            padding = 1
        elif pad_type == 'replication':
            self.pad = nn.ReplicationPad2d(1)
            padding = 0
        else:
            raise NotImplementedError

        # vgg modules

        self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=padding)
        self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=padding)
        self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=padding)
        self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=padding)
        self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=padding)
        self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=padding)
        self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=padding)
        self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=padding)
        self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=padding)
        self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=padding)
        self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=padding)
        self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=padding)
        self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=padding)
        self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=padding)
        self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=padding)
        self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=padding)

        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)

        for param in self.parameters():
            param.requires_grad = False

        if path_to_vgg19_weights is not None:
            # print(f"Using features from {path_to_vgg19_weights} in Perceptual Loss")
            pretrained_state_dict = torch.load(path_to_vgg19_weights)
        elif pretrained:
            # print("Using pretrained features in Perceptual Loss")
            pretrained_state_dict = vgg19(pretrained=True).features.state_dict()
        else:
            # print("Using random features in Perceptual Loss")
            torch.manual_seed(432)
            torch.cuda.manual_seed(3294820948304)
            np.random.seed(55254354)
            random.seed(543354)
            pretrained_state_dict = vgg19(pretrained=False).features.state_dict()

        state_dict = OrderedDict()
        for (new_name, _), (old_name, value) in zip(self.state_dict().items(), pretrained_state_dict.items()):
            state_dict[new_name] = value

        self.load_state_dict(state_dict)
示例#28
0
def lua_recursive_model(module, seq):
    for m in module.modules:
        name = type(m).__name__
        real = m
        if name == 'TorchObject':
            name = m._typename.replace('cudnn.', '')
            m = m._obj

        if name == 'SpatialConvolution':
            if not hasattr(m, 'groups'): m.groups = 1
            n = nn.Conv2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH),
                          (m.dW, m.dH), (m.padW, m.padH), 1, m.groups,
                          bias=(m.bias is not None))
            copy_param(m, n)
            add_submodule(seq, n)
        elif name == 'SpatialBatchNormalization':
            n = nn.BatchNorm2d(m.running_mean.size(0), m.eps, m.momentum,
                               m.affine)
            copy_param(m, n)
            add_submodule(seq, n)
        elif name == 'ReLU':
            n = nn.ReLU()
            add_submodule(seq, n)
        elif name == 'SpatialMaxPooling':
            n = nn.MaxPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
                             ceil_mode=m.ceil_mode)
            add_submodule(seq, n)
        elif name == 'SpatialAveragePooling':
            n = nn.AvgPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
                             ceil_mode=m.ceil_mode)
            add_submodule(seq, n)
        elif name == 'SpatialUpSamplingNearest':
            n = nn.UpsamplingNearest2d(scale_factor=m.scale_factor)
            add_submodule(seq, n)
        elif name == 'View':
            n = Lambda(lambda x: x.view(x.size(0), -1))
            add_submodule(seq, n)
        elif name == 'Linear':
            # Linear in pytorch only accept 2D input
            n1 = Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x)
            n2 = nn.Linear(m.weight.size(1), m.weight.size(0),
                           bias=(m.bias is not None))
            copy_param(m, n2)
            n = nn.Sequential(n1, n2)
            add_submodule(seq, n)
        elif name == 'Dropout':
            m.inplace = False
            n = nn.Dropout(m.p)
            add_submodule(seq, n)
        elif name == 'SoftMax':
            n = nn.Softmax()
            add_submodule(seq, n)
        elif name == 'Identity':
            n = Lambda(lambda x: x)  # do nothing
            add_submodule(seq, n)
        elif name == 'SpatialFullConvolution':
            n = nn.ConvTranspose2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH),
                                   (m.dW, m.dH), (m.padW, m.padH))
            add_submodule(seq, n)
        elif name == 'SpatialReplicationPadding':
            n = nn.ReplicationPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
            add_submodule(seq, n)
        elif name == 'SpatialReflectionPadding':
            n = nn.ReflectionPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
            add_submodule(seq, n)
        elif name == 'Copy':
            n = Lambda(lambda x: x)  # do nothing
            add_submodule(seq, n)
        elif name == 'Narrow':
            n = Lambda(
                lambda x, a=(m.dimension, m.index, m.length): x.narrow(*a))
            add_submodule(seq, n)
        elif name == 'SpatialCrossMapLRN':
            lrn = torch.legacy.nn.SpatialCrossMapLRN(m.size, m.alpha, m.beta,
                                                     m.k)
            n = Lambda(lambda x, lrn=lrn: lrn.forward(x))
            add_submodule(seq, n)
        elif name == 'Sequential':
            n = nn.Sequential()
            lua_recursive_model(m, n)
            add_submodule(seq, n)
        elif name == 'ConcatTable':  # output is list
            n = LambdaMap(lambda x: x)
            lua_recursive_model(m, n)
            add_submodule(seq, n)
        elif name == 'CAddTable':  # input is list
            n = LambdaReduce(lambda x, y: x + y)
            add_submodule(seq, n)
        elif name == 'Concat':
            dim = m.dimension
            n = LambdaReduce(lambda x, y, dim=dim: torch.cat((x, y), dim))
            lua_recursive_model(m, n)
            add_submodule(seq, n)
        elif name == 'TorchObject':
            print('Not Implement', name, real._typename)
        else:
            print('Not Implement', name)
示例#29
0
    def build_conv_block(self,
                         dim,
                         padding_type,
                         norm_layer,
                         use_dropout,
                         use_bias,
                         cated_stream2=False,
                         cal_att=False):
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        if cated_stream2:
            conv_block += [
                nn.Conv2d(dim * 2,
                          dim * 2,
                          kernel_size=3,
                          padding=p,
                          bias=use_bias),
                norm_layer(dim * 2),
                nn.ReLU(True)
            ]
        else:
            conv_block += [
                nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
                norm_layer(dim),
                nn.ReLU(True)
            ]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        if cal_att:
            if cated_stream2:
                conv_block += [
                    nn.Conv2d(dim * 2,
                              dim,
                              kernel_size=3,
                              padding=p,
                              bias=use_bias)
                ]
            else:
                conv_block += [
                    nn.Conv2d(dim,
                              dim,
                              kernel_size=3,
                              padding=p,
                              bias=use_bias)
                ]
        else:
            conv_block += [
                nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
                norm_layer(dim)
            ]

        return nn.Sequential(*conv_block)
示例#30
0
 def _upconv(self, in_channels, out_channels):
     return nn.Sequential(
         nn.ReplicationPad2d(1),
         nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1),
         nn.BatchNorm2d(out_channels),
     )