def __init__ (self, input_channel, output_nc, config, norm_layer=InstanceNorm, dropout_rate=0, n_blocks=9, padding_type='reflect'):
        super(SubMobileResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == InstanceNorm
        else:
            use_bias = norm_layer == InstanceNorm

        self.model = fluid.dygraph.LayerList([Pad2D(paddings=[3, 3, 3, 3], mode="reflect"),
                           Conv2D(input_channel, config['channels'][0], filter_size=7, padding=0, use_cudnn=use_cudnn, bias_attr=use_bias),
                           norm_layer(config['channels'][0]),
                           ReLU()])

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2 ** i
            in_c = config['channels'][i]
            out_c = config['channels'][i + 1]
            self.model.extend([Conv2D(in_c * mult, out_c * mult * 2, filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
                               norm_layer(out_c * mult * 2),
                               ReLU()])

        mult = 2 ** n_downsampling

        in_c = config['channels'][2]
        for i in range(n_blocks):
            if len(config['channels']) == 6:
                offset = 0
            else:
                offset = i // 3
            out_c = config['channels'][offset + 3]
            self.model.extend([MobileResnetBlock(in_c * mult, out_c * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        if len(config['channels']) == 6:
            offset = 4
        else:
            offset = 6
        for i in range(n_downsampling):
            out_c = config['channels'][offset + i]
            mult = 2 ** (n_downsampling - i)
            output_size = (i + 1) * 128
            self.model.extend([Conv2DTranspose(in_c * mult, int(out_c * mult / 2), filter_size=3, output_size=output_size, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
#            self.model.extend([Conv2DTranspose(in_c * mult, int(out_c * mult / 2), filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
#                               Pad2D(paddings=[0, 1, 0, 1], mode='constant', pad_value=0.0),
                               norm_layer(int(out_c * mult / 2)),
                               ReLU()])
            in_c = out_c

        self.model.extend([Pad2D(paddings=[3, 3, 3, 3], mode="reflect")])
        self.model.extend([Conv2D(in_c, output_nc, filter_size=7, padding=0)])
Пример #2
0
    def __init__(self, dim, padding_type, norm_layer, dropout_rate, use_bias=False):
        super(ResnetBlock,self).__init__()

        self.conv_block = fluid.dygraph.LayerList([])
        p = 0
        if padding_type == 'reflect':
            self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')])
        elif padding_type == 'replicate':
            self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='edge')])
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)

        self.conv_block.extend([Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias),
                                norm_layer(dim),
                                ReLU()])
        self.conv_block.extend([Dropout(dropout_rate)])

        p = 0
        if padding_type == 'reflect':
            self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')])
        elif padding_type == 'replicate':
            self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='edge')])
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)

        self.conv_block.extend([Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias), norm_layer(dim)])
Пример #3
0
    def __init__(self, in_c, out_c, padding_type, norm_layer, dropout_rate, use_bias):
        super(MobileResnetBlock, self).__init__()
        self.padding_type = padding_type
        self.dropout_rate = dropout_rate
        self.conv_block = fluid.dygraph.LayerList([])

        p = 0
        if self.padding_type == 'reflect':
            self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')])
        elif self.padding_type == 'replicate':
            self.conv_block.extend([Pad2D(inputs, paddings=[1,1,1,1], mode='edge')])
        elif self.padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % self.padding_type)

        self.conv_block.extend([SeparableConv2D(num_channels=in_c, num_filters=out_c, filter_size=3, padding=p, stride=1), 
                                norm_layer(out_c),
                                ReLU()])

        self.conv_block.extend([Dropout(p=self.dropout_rate)])

        if self.padding_type == 'reflect':
            self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')])
        elif self.padding_type == 'replicate':
            self.conv_block.extend([Pad2D(inputs, paddings=[1,1,1,1], mode='edge')])
        elif self.padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' % self.padding_type)

        self.conv_block.extend([SeparableConv2D(num_channels=out_c, num_filters=in_c, filter_size=3, padding=p, stride=1), 
                                norm_layer(in_c)])
Пример #4
0
    def __init__(self,
                 num_channels,
                 num_filters=64,
                 filter_size=7,
                 stride=1,
                 stddev=0.02,
                 padding=0,
                 norm=True,
                 norm_layer=InstanceNorm,
                 relu=True,
                 relufactor=0.0,
                 use_bias=False):
        super(conv2d, self).__init__()

        if use_bias == False:
            con_bias_attr = False
        else:
            con_bias_attr = fluid.ParamAttr(
                initializer=fluid.initializer.Constant(0.0))

        self.conv = Conv2D(num_channels=num_channels,
                           num_filters=int(num_filters),
                           filter_size=int(filter_size),
                           stride=stride,
                           padding=padding,
                           use_cudnn=use_cudnn,
                           param_attr=fluid.ParamAttr(
                               initializer=fluid.initializer.NormalInitializer(
                                   loc=0.0, scale=stddev)),
                           bias_attr=con_bias_attr)
        if norm_layer == InstanceNorm:
            self.bn = InstanceNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(1.0),
                    trainable=False),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0),
                    trainable=False),
            )
        elif norm_layer == BatchNorm:
            self.bn = BatchNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(initializer=fluid.initializer.
                                           NormalInitializer(1.0, 0.02)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0)),
            )
        else:
            raise NotImplementedError

        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        if relu:
            if relufactor == 0.0:
                self.lrelu = ReLU()
            else:
                self.lrelu = Leaky_ReLU(self.relufactor)
        self.relu = relu
    def __init__(self, num_classes=10):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.features = Sequential(
            Conv2D(
                in_channels=1,
                out_channels=6,
                kernel_size=3,
                stride=1,
                padding=1,
                weight_attr=conv2d_w1_attr,
                bias_attr=False),
            BatchNorm2D(6),
            ReLU(),
            MaxPool2D(
                kernel_size=2, stride=2),
            Conv2D(
                in_channels=6,
                out_channels=16,
                kernel_size=5,
                stride=1,
                padding=0,
                weight_attr=conv2d_w2_attr,
                bias_attr=conv2d_b2_attr),
            BatchNorm2D(16),
            PReLU(),
            MaxPool2D(
                kernel_size=2, stride=2))

        self.fc = Sequential(
            Linear(
                in_features=400,
                out_features=120,
                weight_attr=fc_w1_attr,
                bias_attr=fc_b1_attr),
            LeakyReLU(),
            Linear(
                in_features=120,
                out_features=84,
                weight_attr=fc_w2_attr,
                bias_attr=fc_b2_attr),
            Sigmoid(),
            Linear(
                in_features=84,
                out_features=num_classes,
                weight_attr=fc_w3_attr,
                bias_attr=fc_b3_attr),
            Softmax())
Пример #6
0
    def __init__ (self, input_channel, output_nc, ngf, norm_layer=InstanceNorm, dropout_rate=0, n_blocks=9, padding_type='reflect'):
        super(MobileResnetGenerator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == InstanceNorm
        else:
            use_bias = norm_layer == InstanceNorm

        self.model = fluid.dygraph.LayerList([Pad2D(paddings=[3, 3, 3, 3], mode="reflect"),
                           Conv2D(input_channel, int(ngf), filter_size=7, padding=0, use_cudnn=use_cudnn, bias_attr=use_bias),
                           norm_layer(ngf),
                           ReLU()])

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2 ** i
            self.model.extend([Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
                               norm_layer(ngf * mult * 2),
                               ReLU()])

        mult = 2 ** n_downsampling

        n_blocks1 = n_blocks // 3
        n_blocks2 = n_blocks1
        n_blocks3 = n_blocks - n_blocks1 - n_blocks2

        for i in range(n_blocks1):
            self.model.extend([MobileResnetBlock(ngf * mult, ngf * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        for i in range(n_blocks2):
            self.model.extend([MobileResnetBlock(ngf * mult, ngf * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        for i in range(n_blocks3):
            self.model.extend([MobileResnetBlock(ngf * mult, ngf * mult, padding_type=padding_type, norm_layer=norm_layer, dropout_rate=dropout_rate, use_bias=use_bias)])

        for i in range(n_downsampling):
            mult = 2 ** (n_downsampling - i)
            output_size = (i + 1) * 128
            self.model.extend([Conv2DTranspose(ngf * mult, int(ngf * mult / 2), filter_size=3, output_size=output_size, stride=2, padding=1, use_cudnn=use_cudnn, bias_attr=use_bias),
                               norm_layer(int(ngf * mult / 2)),
                               ReLU()])

        self.model.extend([Pad2D(paddings=[3, 3, 3, 3], mode="reflect")])
        self.model.extend([Conv2D(ngf, output_nc, filter_size=7, padding=0)])
    def __init__(self, dim, padding_type, norm_layer, dropout_rate, use_bias):
        super(SuperMobileResnetBlock, self).__init__()
        self.conv_block = fluid.dygraph.LayerList([])
        p = 0
        if padding_type == 'reflect':
            self.conv_block.extend(
                [Pad2D(paddings=[1, 1, 1, 1], mode="reflect")])
        elif padding_type == 'replicate':
            self.conv_block.extend([Pad2D(paddings=[1, 1, 1, 1], mode="edge")])
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      self.padding_type)

        self.conv_block.extend([
            SuperSeparableConv2D(num_channels=dim,
                                 num_filters=dim,
                                 filter_size=3,
                                 stride=1,
                                 padding=p),
            norm_layer(dim),
            ReLU()
        ])
        self.conv_block.extend([Dropout(dropout_rate)])

        p = 0
        if padding_type == 'reflect':
            self.conv_block.extend(
                [Pad2D(paddings=[1, 1, 1, 1], mode="reflect")])
        elif padding_type == 'replicate':
            self.conv_block.extend([Pad2D(paddings=[1, 1, 1, 1], mode="edge")])
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      self.padding_type)

        self.conv_block.extend([
            SuperSeparableConv2D(num_channels=dim,
                                 num_filters=dim,
                                 filter_size=3,
                                 stride=1,
                                 padding=p),
            norm_layer(dim)
        ])
Пример #8
0
    def __init__(self,
                 num_channels,
                 num_filters=64,
                 filter_size=7,
                 stride=1,
                 stddev=0.02,
                 padding=[0, 0],
                 outpadding=[0, 0, 0, 0],
                 relu=True,
                 norm=True,
                 norm_layer=InstanceNorm,
                 relufactor=0.0,
                 use_bias=False):
        super(DeConv2D, self).__init__()

        if use_bias == False:
            de_bias_attr = False
        else:
            de_bias_attr = fluid.ParamAttr(
                initializer=fluid.initializer.Constant(0.0))

        self._deconv = Conv2DTranspose(
            num_channels,
            num_filters,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NormalInitializer(loc=0.0,
                                                                scale=stddev)),
            bias_attr=de_bias_attr)

        self.pad = Pad2D(paddings=outpadding, mode='constant', pad_value=0.0)
        if norm_layer == InstanceNorm:
            self.bn = InstanceNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(1.0),
                    trainable=False),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0),
                    trainable=False),
            )
        elif norm_layer == BatchNorm:
            self.bn = BatchNorm(
                num_channels=num_filters,
                param_attr=fluid.ParamAttr(initializer=fluid.initializer.
                                           NormalInitializer(1.0, 0.02)),
                bias_attr=fluid.ParamAttr(
                    initializer=fluid.initializer.Constant(0.0)),
            )
        else:
            raise NotImplementedError

        self.outpadding = outpadding
        self.relufactor = relufactor
        self.use_bias = use_bias
        self.norm = norm
        self.relu = relu
        if relu:
            if relufactor == 0.0:
                self.lrelu = ReLU()
            else:
                self.lrelu = Leaky_ReLU(self.relufactor)
Пример #9
0
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(ImperativeLenet, self).__init__()
        conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
        conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
        fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
        fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
        fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
        conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
        conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
        fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
        fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
        fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
        self.conv2d_0 = Conv2D(
            in_channels=1,
            out_channels=6,
            kernel_size=3,
            stride=1,
            padding=1,
            weight_attr=conv2d_w1_attr,
            bias_attr=conv2d_b1_attr)
        self.conv2d_0.skip_quant = True

        self.batch_norm_0 = BatchNorm(6)
        self.relu_0 = ReLU()
        self.pool2d_0 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv2d_1 = Conv2D(
            in_channels=6,
            out_channels=16,
            kernel_size=5,
            stride=1,
            padding=0,
            weight_attr=conv2d_w2_attr,
            bias_attr=conv2d_b2_attr)
        self.conv2d_1.skip_quant = False

        self.batch_norm_1 = BatchNorm(16)
        self.relu6_0 = ReLU6()
        self.pool2d_1 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.linear_0 = Linear(
            in_features=400,
            out_features=120,
            weight_attr=fc_w1_attr,
            bias_attr=fc_b1_attr)
        self.linear_0.skip_quant = True

        self.leaky_relu_0 = LeakyReLU()
        self.linear_1 = Linear(
            in_features=120,
            out_features=84,
            weight_attr=fc_w2_attr,
            bias_attr=fc_b2_attr)
        self.linear_1.skip_quant = False

        self.sigmoid_0 = Sigmoid()
        self.linear_2 = Linear(
            in_features=84,
            out_features=num_classes,
            weight_attr=fc_w3_attr,
            bias_attr=fc_b3_attr)
        self.linear_2.skip_quant = False
        self.softmax_0 = Softmax()
    def __init__(self,
                 input_channel,
                 output_nc,
                 ngf,
                 norm_layer=InstanceNorm,
                 dropout_rate=0,
                 n_blocks=6,
                 padding_type='reflect'):
        assert n_blocks >= 0
        super(SuperMobileResnetGenerator, self).__init__()
        use_bias = norm_layer == InstanceNorm

        if norm_layer.func == InstanceNorm or norm_layer == InstanceNorm:
            norm_layer = SuperInstanceNorm
        else:
            raise NotImplementedError

        self.model = fluid.dygraph.LayerList([])
        self.model.extend([
            Pad2D(paddings=[3, 3, 3, 3], mode="reflect"),
            SuperConv2D(input_channel,
                        ngf,
                        filter_size=7,
                        padding=0,
                        bias_attr=use_bias),
            norm_layer(ngf),
            ReLU()
        ])

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            self.model.extend([
                SuperConv2D(ngf * mult,
                            ngf * mult * 2,
                            filter_size=3,
                            stride=2,
                            padding=1,
                            bias_attr=use_bias),
                norm_layer(int(ngf * mult * 2)),
                ReLU()
            ])

        mult = 2**n_downsampling
        n_blocks1 = n_blocks // 3
        n_blocks2 = n_blocks1
        n_blocks3 = n_blocks - n_blocks1 - n_blocks2

        for i in range(n_blocks1):
            self.model.extend([
                SuperMobileResnetBlock(ngf * mult,
                                       padding_type=padding_type,
                                       norm_layer=norm_layer,
                                       dropout_rate=dropout_rate,
                                       use_bias=use_bias)
            ])

        for i in range(n_blocks2):
            self.model.extend([
                SuperMobileResnetBlock(ngf * mult,
                                       padding_type=padding_type,
                                       norm_layer=norm_layer,
                                       dropout_rate=dropout_rate,
                                       use_bias=use_bias)
            ])

        for i in range(n_blocks3):
            self.model.extend([
                SuperMobileResnetBlock(ngf * mult,
                                       padding_type=padding_type,
                                       norm_layer=norm_layer,
                                       dropout_rate=dropout_rate,
                                       use_bias=use_bias)
            ])

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            output_size = (i + 1) * 128
            #### torch:out_padding = 1 => paddle:deconv + pad
            self.model.extend([
                SuperConv2DTranspose(ngf * mult,
                                     int(ngf * mult / 2),
                                     filter_size=3,
                                     output_size=output_size,
                                     stride=2,
                                     padding=1,
                                     bias_attr=use_bias),
                norm_layer(int(ngf * mult / 2)),
                ReLU()
            ])

        self.model.extend([Pad2D(paddings=[3, 3, 3, 3], mode="reflect")])
        self.model.extend(
            [SuperConv2D(ngf, output_nc, filter_size=7, padding=0)])