コード例 #1
0
    def __init__(self, config_text, norm_nc, label_nc, nhidden=128):
        super(MobileSPADE, self).__init__()

        assert config_text.startswith('spade')
        parsed = re.search(r'spade(\D+)(\d)x\d', config_text)
        param_free_norm_type = str(parsed.group(1))
        ks = int(parsed.group(2))

        if param_free_norm_type == 'instance':
            self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
        elif param_free_norm_type == 'syncbatch':
            self.param_free_norm = SynchronizedBatchNorm2d(norm_nc,
                                                           affine=False)
        elif param_free_norm_type == 'batch':
            self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
        else:
            raise ValueError(
                '%s is not a recognized param-free norm type in SPADE' %
                param_free_norm_type)

        # The dimension of the intermediate embedding space. Yes, hardcoded.

        pw = ks // 2
        self.mlp_shared = nn.Sequential(
            nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
            nn.ReLU())
        self.mlp_gamma = SeparableConv2d(nhidden,
                                         norm_nc,
                                         kernel_size=ks,
                                         padding=pw)
        self.mlp_beta = SeparableConv2d(nhidden,
                                        norm_nc,
                                        kernel_size=ks,
                                        padding=pw)
コード例 #2
0
    def build_conv_block(self, dim, padding_type, norm_layer, dropout_rate,
                         use_bias):
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zeros':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            SeparableConv2d(in_channels=dim,
                            out_channels=dim,
                            kernel_size=3,
                            padding=p,
                            stride=1),
            norm_layer(dim),
            nn.ReLU(True)
        ]
        conv_block += [nn.Dropout(dropout_rate)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zeros':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            SeparableConv2d(in_channels=dim,
                            out_channels=dim,
                            kernel_size=3,
                            padding=p,
                            stride=1),
            norm_layer(dim)
        ]

        return nn.Sequential(*conv_block)
コード例 #3
0
 def __init__(self, channel, kernel, stride, padding_type):
     super(MobileResnetBlock_attn, self).__init__()
     self.channel = channel
     self.kernel = kernel
     self.stride = stride
     self.padding = 1
     self.padding_type = padding_type
     self.conv1 = SeparableConv2d(channel,
                                  channel,
                                  kernel,
                                  stride,
                                  padding=self.padding,
                                  padding_mode=self.padding_type)
     self.conv1_norm = nn.InstanceNorm2d(channel)
     self.conv2 = SeparableConv2d(channel,
                                  channel,
                                  kernel,
                                  stride,
                                  padding=self.padding,
                                  padding_mode=self.padding_type)
     self.conv2_norm = nn.InstanceNorm2d(channel)
コード例 #4
0
    def __init__(self, input_dim, output_dim, kernel_size, stride,
                 padding=0, norm='none', activation='relu', pad_type='zero'):
        super(MobileConv2dBlock, self).__init__()
        self.use_bias = True
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            raise NotImplementedError
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'ln':
            self.norm = LayerNorm(norm_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(norm_dim)
        elif norm == 'none' or norm == 'sn':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        # initialize convolution
        if norm == 'sn':
            raise NotImplementedError
        else:
            self.conv = SeparableConv2d(input_dim, output_dim, kernel_size, stride, use_bias=self.use_bias)
 def __init__(self, dim, n_feat, out_feat, init_type='normal', init_gain=0.02, gpu_ids=[]):
     super(NBlock, self).__init__()
     self.out_feat = out_feat
     if out_feat < 32: # size of input
         self.conv2d = nn.Conv2d(dim,1,kernel_size=1)
         self.lin = nn.Linear(n_feat,out_feat**2)
         n_block = []
         n_block += [self.conv2d,nn.InstanceNorm2d(1),nn.Flatten(),self.lin]
         self.n_block = init_net(nn.Sequential(*n_block), init_type, init_gain, gpu_ids)
     else:
         self.n_block = []
         self.n_block = [SeparableConv2d(in_channels=256,out_channels=32,kernel_size=3,stride=1,padding=1),
                         nn.InstanceNorm2d(1),
                         nn.ReLU(True)]
         self.n_block += [nn.Upsample((out_feat,out_feat))]
         self.n_block += [nn.Conv2d(in_channels=32,out_channels=1,kernel_size=1)]
         self.n_block += [nn.Flatten()]
         self.n_block = init_net(nn.Sequential(*self.n_block), init_type, init_gain, gpu_ids)