Esempio n. 1
0
 def _build_weights(self, dim_in, dim_out):
     self.conv1 = nn.Conv2D(dim_in, dim_in, 3, 1, 1)
     self.conv2 = nn.Conv2D(dim_in, dim_out, 3, 1, 1)
     if self.normalize:
         self.norm1 = fluid.dygraph.InstanceNorm(
             dim_in,
             epsilon=1e-05,
             param_attr=fluid.ParamAttr(
                 initializer=fluid.initializer.Constant(1.0),
                 trainable=False),
             bias_attr=fluid.ParamAttr(
                 initializer=fluid.initializer.Constant(0.0),
                 trainable=False),
             dtype='float32')  # affine=False,对应代码中的两个参数设置
         self.norm2 = fluid.dygraph.InstanceNorm(
             dim_in,
             epsilon=1e-05,
             param_attr=fluid.ParamAttr(
                 initializer=fluid.initializer.Constant(1.0),
                 trainable=False),
             bias_attr=fluid.ParamAttr(
                 initializer=fluid.initializer.Constant(0.0),
                 trainable=False),
             dtype='float32')  # affine=False,对应代码中的两个参数设置
     if self.learned_sc:
         self.conv1x1 = nn.Conv2D(dim_in, dim_out, 1, 1, 0, bias_attr=False)
Esempio n. 2
0
    def __init__(self,
                 img_size=256,
                 style_dim=64,
                 num_domains=2,
                 max_conv_dim=512):
        super().__init__()
        self.num_domains = num_domains
        dim_in = 2**14 // img_size
        blocks = []
        blocks += [nn.Conv2D(3, dim_in, 3, 1, 1)]

        repeat_num = int(np.log2(img_size)) - 2
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            blocks += [ResBlk(dim_in, dim_out, downsample=True)]
            dim_in = dim_out

        blocks += [LeakyRelu(alpha=0.2)]
        blocks += [nn.Conv2D(dim_out, dim_out, 4, 1, 0)]
        blocks += [LeakyRelu(alpha=0.2)]
        self.shared = fluid.dygraph.Sequential(*blocks)

        self.unshared = fluid.dygraph.Sequential()
        for _ in range(num_domains):
            self.unshared.add_sublayer(f'lsub_{_}',
                                       nn.Linear(dim_out, style_dim))
Esempio n. 3
0
 def _build_weights(self, dim_in, dim_out, style_dim=64):
     self.conv1 = nn.Conv2D(dim_in, dim_out, 3, 1, 1)
     self.conv2 = nn.Conv2D(dim_out, dim_out, 3, 1, 1)
     self.norm1 = AdaIN(style_dim, dim_in)
     self.norm2 = AdaIN(style_dim, dim_out)
     if self.learned_sc:
         self.conv1x1 = nn.Conv2D(dim_in, dim_out, 1, 1, 0, bias_attr=False)
Esempio n. 4
0
    def __init__(self):
        super(ModelConv, self).__init__()
        with supernet(kernel_size=(3, 5, 7),
                      channel=((4, 8, 12), (8, 12, 16), (8, 12, 16),
                               (8, 12, 16))) as ofa_super:
            models = []
            models += [nn.Conv2D(3, 4, 3, padding=1)]
            models += [nn.InstanceNorm(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 4, 3, groups=4)]
            models += [nn.InstanceNorm(4)]
            models += [ReLU()]
            models += [
                nn.Conv2DTranspose(4,
                                   4,
                                   3,
                                   groups=4,
                                   padding=1,
                                   use_cudnn=True)
            ]
            models += [nn.BatchNorm(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 3, 3)]
            models += [ReLU()]
            models = ofa_super.convert(models)

        models += [
            Block(SuperSeparableConv2D(3,
                                       6,
                                       1,
                                       padding=1,
                                       candidate_config={'channel': (3, 6)}),
                  fixed=True)
        ]
        with supernet(kernel_size=(3, 5, 7),
                      expand_ratio=(1, 2, 4)) as ofa_super:
            models1 = []
            models1 += [nn.Conv2D(6, 4, 3)]
            models1 += [nn.BatchNorm(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2D(4, 4, 3, groups=2)]
            models1 += [nn.InstanceNorm(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3, groups=2)]
            models1 += [nn.BatchNorm(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm(4)]
            models1 += [ReLU()]
            models1 = ofa_super.convert(models1)

        models += models1

        self.models = paddle.nn.Sequential(*models)
 def __init__(self, in_planes, out_planes, stride=1):
     super(Block, self).__init__()
     self.conv1 = nn.Conv2D(in_planes,
                            in_planes,
                            filter_size=3,
                            stride=stride,
                            padding=1,
                            groups=in_planes,
                            bias_attr=False)
     self.bn1 = nn.InstanceNorm(in_planes)
     self.conv2 = nn.Conv2D(in_planes,
                            out_planes,
                            filter_size=1,
                            stride=1,
                            padding=0)
     self.bn2 = nn.InstanceNorm(out_planes)
 def __init__(self):
     super(MobileNet, self).__init__()
     self.conv1 = nn.Conv2D(shape[0],
                            shape[0],
                            filter_size=3,
                            stride=1,
                            padding=1,
                            bias_attr=False)
     self.bn1 = nn.InstanceNorm(shape[0])
     self._layers = self._make_layers(in_planes=shape[0])
     self.conv_last = nn.Conv2D(shape[0],
                                shape[0],
                                filter_size=3,
                                stride=1,
                                padding=1,
                                bias_attr=False)
Esempio n. 7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 filter_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bn_init_constant=1.0):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2D(num_channels=in_channels,
                              filter_size=filter_size,
                              num_filters=out_channels,
                              stride=stride,
                              padding=padding,
                              dilation=dilation,
                              groups=groups,
                              param_attr=weight_init(),
                              bias_attr=False)
        self.bn = nn.BatchNorm(out_channels,
                               param_attr=norm_weight_init(bn_init_constant),
                               bias_attr=norm_bias_init(),
                               act=None,
                               momentum=0.1,
                               use_global_stats=True)
Esempio n. 8
0
    def __init__(self, img_size=256, num_domains=2, max_conv_dim=512):
        super().__init__()
        dim_in = 2**14 // img_size
        blocks = []
        blocks += [nn.Conv2D(3, dim_in, 3, 1, 1)]

        repeat_num = int(np.log2(img_size)) - 2
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            blocks += [ResBlk(dim_in, dim_out, downsample=True)]
            dim_in = dim_out

        blocks += [LeakyRelu(alpha=0.2)]
        blocks += [nn.Conv2D(dim_out, dim_out, 4, 1, 0)]
        blocks += [LeakyRelu(alpha=0.2)]
        blocks += [nn.Conv2D(dim_out, num_domains, 1, 1, 0)]
        self.main = fluid.dygraph.Sequential(*blocks)
Esempio n. 9
0
 def __init__(self):
     super(Model, self).__init__()
     with supernet(kernel_size=(3, 5, 7), expand_ratio=[1, 2,
                                                        4]) as ofa_super:
         models = []
         models += [nn.Conv2D(1, 6, 3)]
         models += [ReLU()]
         models += [nn.Pool2D(2, 'max', 2)]
         models += [nn.Conv2D(6, 16, 5, padding=0)]
         models += [ReLU()]
         models += [nn.Pool2D(2, 'max', 2)]
         models += [
             nn.Linear(784, 120),
             nn.Linear(120, 84),
             nn.Linear(84, 10)
         ]
         models = ofa_super.convert(models)
     self.models = paddle.nn.Sequential(*models)
Esempio n. 10
0
 def __init__(self, num_channels, num_filters, is_test=False):
     super(AdjustLayer, self).__init__()
     self.conv = nn.Conv2D(num_channels=num_channels,
                           num_filters=num_filters,
                           filter_size=1,
                           param_attr=weight_init(),
                           bias_attr=False)
     self.bn = nn.BatchNorm(num_channels=num_filters,
                            param_attr=norm_weight_init(),
                            bias_attr=norm_bias_init(),
                            momentum=0.9,
                            act=None,
                            use_global_stats=is_test)
Esempio n. 11
0
    def __init__(self, name_scope, num_classes=1):
        super(LeNet, self).__init__(name_scope)

        # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
        self.conv1 = nn.Conv2D(num_channels=1,
                               num_filters=6,
                               filter_size=5,
                               act='sigmoid')
        self.pool1 = nn.Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = nn.Conv2D(num_channels=6,
                               num_filters=16,
                               filter_size=5,
                               act='sigmoid')
        self.pool2 = nn.Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 创建第3个卷积层
        self.conv3 = nn.Conv2D(num_channels=16,
                               num_filters=120,
                               filter_size=4,
                               act='sigmoid')
        # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分裂标签的类别数
        self.fc1 = nn.Linear(input_dim=120, output_dim=64, act='sigmoid')
        self.fc2 = nn.Linear(input_dim=64, output_dim=num_classes)
Esempio n. 12
0
    def __init__(self, name):
        super().__init__(name)
        init_w = fluid.ParamAttr(
            name="a_weight",
            initializer=fluid.initializer.ConstantInitializer(0.001),
            learning_rate=0.,
            trainable=False)
        init_b = fluid.ParamAttr(
            name="a_bias",
            initializer=fluid.initializer.ConstantInitializer(0.),
            trainable=True)

        self.adjust_conv = nn.Conv2D(1,
                                     1,
                                     1,
                                     1,
                                     0,
                                     param_attr=init_w,
                                     bias_attr=init_b)
Esempio n. 13
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 filter_size,
                 stride=1,
                 groups=1,
                 padding=1,
                 is_test=False):
        super(ConvBNReluLayer, self).__init__()

        self.conv = nn.Conv2D(num_channels=in_channels,
                              filter_size=filter_size,
                              num_filters=out_channels,
                              stride=stride,
                              padding=padding,
                              groups=groups,
                              bias_attr=bias_init(),
                              param_attr=weight_init())
        self.bn = nn.BatchNorm(out_channels,
                               param_attr=norm_weight_init(),
                               bias_attr=norm_bias_init(),
                               act=None,
                               momentum=0.9,
                               use_global_stats=is_test)
Esempio n. 14
0
 def __init__(self, num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1):
     super(Conv1d, self).__init__()
     self.conv2d = nn.Conv2D(num_channels, num_filters, (filter_size, 1),
                             stride=stride, padding=[padding, 0], dilation=[dilation, 1])
Esempio n. 15
0
    def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
        super().__init__()
        dim_in = 2**14 // img_size  # dim_in: 64
        self.img_size = img_size
        self.from_rgb = nn.Conv2D(3, dim_in, 3, 1, 1)
        self.encode = fluid.dygraph.Sequential()
        self.decode = fluid.dygraph.Sequential()
        self.to_rgb = fluid.dygraph.Sequential(
            fluid.dygraph.InstanceNorm(
                dim_in,
                epsilon=1e-05,
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(1.0),
                    trainable=False),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0),
                    trainable=False),
                dtype='float32'),  # affine=False,对应代码中的两个参数设置
            # functools.partial(fluid.layers.leaky_relu, alpha=0.2),
            LeakyRelu(alpha=0.2),
            nn.Conv2D(dim_in, 3, 1, 1, 0))
        self.w_hpf = w_hpf

        # down/up-sampling blocks
        repeat_num = int(np.log2(img_size)) - 4  # repeat_num: 4
        self.repeat_num = repeat_num
        if w_hpf > 0:
            repeat_num += 1
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            self.encode.add_sublayer(
                f'lsample_{_}',
                ResBlk(dim_in, dim_out, normalize=True, downsample=True))
            self.decode.add_sublayer(
                f'lsample_{_}',
                AdainResBlk(dim_out,
                            dim_in,
                            style_dim,
                            w_hpf=w_hpf,
                            upsample=True))
            dim_in = dim_out

        # bottleneck blocks
        for _ in range(2):
            self.encode.add_sublayer(f'lbnk_{_}',
                                     ResBlk(dim_out, dim_out, normalize=True))
            self.decode.add_sublayer(
                f'lbnk_{_}',
                AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))

        self.decode1 = fluid.dygraph.Sequential()
        for _ in list(range(2))[::-1]:
            layer = self.decode[f'lbnk_{_}']
            self.decode1.add_sublayer(f'lbnk_{_}', layer)
        for _ in list(range(repeat_num))[::-1]:
            layer = self.decode[f'lsample_{_}']
            self.decode1.add_sublayer(f'lsample_{_}', layer)
        self.decode = self.decode1  # stack-like

        if w_hpf > 0:
            self.hpf = HighPass(w_hpf)
Esempio n. 16
0
 def layer_init(self):
     # for conv1
     self.conv1 = nn.Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=2,
                            padding=0,
                            groups=1,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn1 = nn.BatchNorm(num_channels=96,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     self.pool1 = nn.Pool2D(pool_size=3,
                            pool_type="max",
                            pool_stride=2,
                            pool_padding=0)
     # for conv2
     self.conv2 = nn.Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=0,
                            groups=2,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn2 = nn.BatchNorm(num_channels=256,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     self.pool2 = nn.Pool2D(pool_size=3,
                            pool_type="max",
                            pool_stride=2,
                            pool_padding=0)
     # for conv3
     self.conv3 = nn.Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            groups=1,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn3 = nn.BatchNorm(num_channels=384,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     # for conv4
     self.conv4 = nn.Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            groups=2,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn4 = nn.BatchNorm(num_channels=384,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     # for conv5
     self.conv5 = nn.Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            groups=2,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())