예제 #1
0
 def __init__(self, in_size, out_size):
     super(SimpleNet, self).__init__()
     self._linear = Linear(in_size, out_size)
예제 #2
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            ReflectionPad2d(pad=1),
            Spectralnorm(
                Conv2D(num_channels=input_nc,
                       num_filters=ndf,
                       filter_size=4,
                       stride=2,
                       padding=0,
                       bias_attr=True)),
            LeakyReLU(alpha=0.2, inplace=True)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                ReflectionPad2d(pad=1),
                Spectralnorm(
                    Conv2D(num_channels=ndf * mult,
                           num_filters=ndf * mult * 2,
                           filter_size=4,
                           stride=2,
                           padding=0,
                           bias_attr=True)),
                LeakyReLU(alpha=0.2, inplace=True)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            ReflectionPad2d(pad=1),
            Spectralnorm(
                Conv2D(num_channels=ndf * mult,
                       num_filters=ndf * mult * 2,
                       filter_size=4,
                       stride=1,
                       padding=0,
                       bias_attr=True)),
            LeakyReLU(alpha=0.2, inplace=True)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = Spectralnorm(
            Linear(input_dim=ndf * mult, output_dim=1, bias_attr=False))
        self.gmp_fc = Spectralnorm(
            Linear(input_dim=ndf * mult, output_dim=1, bias_attr=False))
        self.conv1x1 = Conv2D(num_channels=ndf * mult * 2,
                              num_filters=ndf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.leaky_relu = LeakyReLU(alpha=0.2, inplace=True)

        self.pad = ReflectionPad2d(pad=1)
        self.conv = Spectralnorm(
            Conv2D(num_channels=ndf * mult,
                   num_filters=1,
                   filter_size=4,
                   stride=1,
                   padding=0,
                   bias_attr=False))

        self.model = Sequential(*model)
예제 #3
0
    def __init__(self,
                 emb_size=128,
                 hidden_size=768,
                 n_layer=12,
                 voc_size=30522,
                 max_position_seq_len=512,
                 sent_types=2,
                 return_pooled_out=True,
                 initializer_range=1.0,
                 conv_type="conv_bn",
                 search_layer=False,
                 use_fp16=False,
                 use_fixed_gumbel=False,
                 gumbel_alphas=None):
        super(BertModelLayer, self).__init__()

        self._emb_size = emb_size
        self._hidden_size = hidden_size
        self._n_layer = n_layer
        self._voc_size = voc_size
        self._max_position_seq_len = max_position_seq_len
        self._sent_types = sent_types
        self.return_pooled_out = return_pooled_out

        self.use_fixed_gumbel = use_fixed_gumbel

        self._word_emb_name = "s_word_embedding"
        self._pos_emb_name = "s_pos_embedding"
        self._sent_emb_name = "s_sent_embedding"
        self._dtype = "float16" if use_fp16 else "float32"

        self._conv_type = conv_type
        self._search_layer = search_layer
        self._param_initializer = fluid.initializer.TruncatedNormal(
            scale=initializer_range)

        self._src_emb = Embedding(size=[self._voc_size, self._emb_size],
                                  param_attr=fluid.ParamAttr(
                                      name=self._word_emb_name,
                                      initializer=self._param_initializer),
                                  dtype=self._dtype)

        self._pos_emb = Embedding(
            size=[self._max_position_seq_len, self._emb_size],
            param_attr=fluid.ParamAttr(name=self._pos_emb_name,
                                       initializer=self._param_initializer),
            dtype=self._dtype)

        self._sent_emb = Embedding(size=[self._sent_types, self._emb_size],
                                   param_attr=fluid.ParamAttr(
                                       name=self._sent_emb_name,
                                       initializer=self._param_initializer),
                                   dtype=self._dtype)

        self._emb_fac = Linear(
            input_dim=self._emb_size,
            output_dim=self._hidden_size,
            param_attr=fluid.ParamAttr(name="s_emb_factorization"))

        self._encoder = EncoderLayer(n_layer=self._n_layer,
                                     hidden_size=self._hidden_size,
                                     search_layer=self._search_layer,
                                     use_fixed_gumbel=self.use_fixed_gumbel,
                                     gumbel_alphas=gumbel_alphas)
예제 #4
0
파일: model.py 프로젝트: huangjin520/VGGNet
    def __init__(self, num_channels=3, out_dim=2):
        '''
        @Brief:
            使用 `Inception_v1` 结构搭建的 `GoogLeNet` 模型
            注: 喂入的图片最好是 224 * 224
        @Parameters:
            num_channels : 输入的图片通道数
            out_dim      : 输出的维度(几分类就是几)
        @Return:
            out          : 主输出(shape=(X, out_dim))
            out1         : 辅助分类器_1的输出(shape=(X, out_dim))
            out2         : 辅助分类器_2的输出(shape=(X, out_dim))
        @Examples:
        ------------
        >>> import numpy as np
        >>> data = np.ones(shape=(8, 3, 224, 224), dtype=np.float32) # 假设为8张三通道的照片
        >>> with fluid.dygraph.guard():
                googlenet = GoogLeNet(out_dim=10)
                data = fluid.dygraph.to_variable(data)
                y, _, _ = googlenet(data)
                print(y.numpy().shape)
        (8, 10)
        '''

        super(GoogLeNet, self).__init__()

        part1_list = [
            {
                'type': Conv2D,
                'num_channels': num_channels,
                'num_filters': 64,
                'filter_size': 7,
                'stride': 2,
                'padding': 3,
                'act': None,
                'bias_attr': False
            },
            {
                'type': Pool2D,
                'pool_size': 3,
                'pool_type': 'max',
                'pool_stride': 2,
                'pool_padding': 0,
                'global_pooling': False
            },
        ]

        part2_list = [
            {
                'type': Conv2D,
                'num_channels': 64,
                'num_filters': 64,
                'filter_size': 1,
                'stride': 1,
                'padding': 0,
                'act': None,
                'bias_attr': False
            },
            {
                'type': Conv2D,
                'num_channels': 64,
                'num_filters': 192,
                'filter_size': 3,
                'stride': 1,
                'padding': 1,
                'act': None,
                'bias_attr': False
            },
        ]

        self.googLeNet_part1 = Sequential(
            ('part1', LinConPoo(part1_list)),
            ('BN1', BatchNorm(64)),
            ('part2', LinConPoo(part2_list)),
            ('BN2', BatchNorm(192)),
            ('MaxPool1', Pool2D(pool_size=3, pool_type='max', pool_stride=2)),
            ('inception_3a', Inception_v1(192, 64, 96, 128, 16, 32, 32)),
            ('inception_3b', Inception_v1(256, 128, 128, 192, 32, 96, 64)),
            ('MaxPool2', Pool2D(pool_size=3, pool_type='max', pool_stride=2)),
            ('inception_4a', Inception_v1(480, 192, 96, 208, 16, 48, 64)),
        )

        # `self.googLeNet_part1` 完成了 `inception_4a` 之前的部分, 此处需要辅助分类器
        self.auxiliary_classifier1_1 = LinConPoo([
            {
                'type': Pool2D,
                'pool_size': 5,
                'pool_type': 'avg',
                'pool_stride': 3,
                'pool_padding': 0,
                'global_pooling': False
            },
            {
                'type': Conv2D,
                'num_channels': 512,
                'num_filters': 128,
                'filter_size': 1,
                'stride': 1,
                'padding': 0,
                'act': None,
                'bias_attr': False
            },
        ])
        self.auxiliary_classifier1_fc1 = Linear(input_dim=128 * 3 * 3,
                                                output_dim=1024,
                                                act='relu',
                                                bias_attr=True)
        self.auxiliary_classifier1_fc2 = Linear(input_dim=1024,
                                                output_dim=out_dim,
                                                act='softmax',
                                                bias_attr=True)

        # 此处开始定义辅助分类器之后的部分
        self.googLeNet_part2 = Sequential(
            # ('googLeNet_part1', self.googLeNet_part1),
            ('inception_4b', Inception_v1(512, 160, 112, 224, 24, 64, 64)),
            ('inception_4c', Inception_v1(512, 128, 128, 256, 24, 64, 64)),
            ('inception_4d', Inception_v1(512, 112, 144, 288, 32, 64, 64)),
        )

        # `self.googLeNet_part2`完成了 `inception_4e` 之前的部分, 此处需要辅助分类器
        self.auxiliary_classifier2_1 = LinConPoo([
            {
                'type': Pool2D,
                'pool_size': 5,
                'pool_type': 'avg',
                'pool_stride': 3,
                'pool_padding': 0,
                'global_pooling': False
            },
            {
                'type': Conv2D,
                'num_channels': 512,
                'num_filters': 128,
                'filter_size': 1,
                'stride': 1,
                'padding': 0,
                'act': None,
                'bias_attr': False
            },
        ])
        self.auxiliary_classifier2_fc1 = Linear(input_dim=128 * 3 * 3,
                                                output_dim=1024,
                                                act='relu',
                                                bias_attr=True)
        self.auxiliary_classifier2_fc2 = Linear(input_dim=1024,
                                                output_dim=out_dim,
                                                act='softmax',
                                                bias_attr=True)

        # 此处开始定义辅助分类器之后的部分
        self.googLeNet_part3 = Sequential(
            # ('googLeNet_part2', self.googLeNet_part2),
            ('inception_4e', Inception_v1(528, 256, 160, 320, 32, 128, 128)),
            ('MaxPool3', Pool2D(pool_size=3, pool_type='max', pool_stride=2)),
            ('inception_5a', Inception_v1(832, 256, 160, 320, 32, 128, 128)),
            ('inception_5b', Inception_v1(832, 384, 192, 384, 48, 128, 128)),
            ('AvgPool1', Pool2D(pool_size=6, pool_type='max', pool_stride=1)),
        )
        # 由于 `Sequential` 中不能添加 dropout 层, 所以此处仍然要分割
        self.last_fc = Linear(1024, out_dim, act='softmax', bias_attr=True)
예제 #5
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()

        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
            ReflectionPad2d(pad=3),
            Conv2D(num_channels=input_nc,
                   num_filters=ngf,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            InstanceNorm(num_channels=ngf),
            ReLU(inplace=True)
        ]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                ReflectionPad2d(pad=1),
                Conv2D(num_channels=ngf * mult,
                       num_filters=ngf * mult * 2,
                       filter_size=3,
                       stride=2,
                       padding=0,
                       bias_attr=False),
                InstanceNorm(num_channels=ngf * mult * 2),
                ReLU(inplace=True)
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = Linear(input_dim=ngf * mult,
                             output_dim=1,
                             bias_attr=False)
        self.gmp_fc = Linear(input_dim=ngf * mult,
                             output_dim=1,
                             bias_attr=False)
        self.conv1x1 = Conv2D(num_channels=ngf * mult * 2,
                              num_filters=ngf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.relu = ReLU(inplace=True)

        # Gamma, Beta block
        if self.light:
            FC = [
                Linear(input_dim=ngf * mult,
                       output_dim=ngf * mult,
                       bias_attr=False),
                ReLU(inplace=True),
                Linear(input_dim=ngf * mult,
                       output_dim=ngf * mult,
                       bias_attr=False),
                ReLU(True)
            ]
        else:
            FC = [
                Linear(input_dim=img_size // mult * img_size // mult * ngf *
                       mult,
                       output_dim=ngf * mult,
                       bias_attr=False),
                ReLU(inplace=True),
                Linear(input_dim=ngf * mult,
                       output_dim=ngf * mult,
                       bias_attr=False),
                ReLU(True)
            ]

        self.gamma = Linear(input_dim=ngf * mult,
                            output_dim=ngf * mult,
                            bias_attr=False)
        self.beta = Linear(input_dim=ngf * mult,
                           output_dim=ngf * mult,
                           bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i + 1),
                    ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                Upsample(scale=2),
                ReflectionPad2d(pad=1),
                Conv2D(num_channels=ngf * mult,
                       num_filters=int(ngf * mult / 2),
                       filter_size=3,
                       stride=1,
                       padding=0,
                       bias_attr=False),
                ILN(int(ngf * mult / 2)),
                ReLU(True)
            ]

        UpBlock2 += [
            ReflectionPad2d(pad=3),
            Conv2D(num_channels=ngf,
                   num_filters=output_nc,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            Tanh()
        ]

        self.DownBlock = Sequential(*DownBlock)
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
    def __init__(self, param_attr=None, bias_attr=None):
        super(MLP, self).__init__()

        self._fc1 = Linear(784, 10)
        self._fc2 = Linear(10, 10)
예제 #7
0
    def __init__(self, name_scope, use_poster, use_mov_title, use_mov_cat,
                 use_age_job):
        super(Model, self).__init__(name_scope)
        name = self.full_name()

        # 将传入的name信息和bool型参数添加到模型类中
        self.use_mov_poster = use_poster
        self.use_mov_title = use_mov_title
        self.use_usr_age_job = use_age_job
        self.use_mov_cat = use_mov_cat

        # 获取数据集的信息,并构建训练和验证集的数据迭代器
        Dataset = MovieLen(self.use_mov_poster)
        self.Dataset = Dataset
        self.trainset = self.Dataset.train_dataset
        self.valset = self.Dataset.valid_dataset
        self.train_loader = self.Dataset.load_data(dataset=self.trainset,
                                                   mode='train')
        self.valid_loader = self.Dataset.load_data(dataset=self.valset,
                                                   mode='valid')
        """ define network layer for embedding usr info """
        USR_ID_NUM = Dataset.max_usr_id + 1
        # 对用户ID做映射,并紧接着一个FC层
        self.usr_emb = Embedding([USR_ID_NUM, 32], is_sparse=False)
        self.usr_fc = Linear(32, 32)

        # 对用户性别信息做映射,并紧接着一个FC层
        USR_GENDER_DICT_SIZE = 2
        self.usr_gender_emb = Embedding([USR_GENDER_DICT_SIZE, 16])
        self.usr_gender_fc = Linear(16, 16)

        # 对用户年龄信息做映射,并紧接着一个FC层
        USR_AGE_DICT_SIZE = Dataset.max_usr_age + 1
        self.usr_age_emb = Embedding([USR_AGE_DICT_SIZE, 16])
        self.usr_age_fc = Linear(16, 16)

        # 对用户职业信息做映射,并紧接着一个FC层
        USR_JOB_DICT_SIZE = Dataset.max_usr_job + 1
        self.usr_job_emb = Embedding([USR_JOB_DICT_SIZE, 16])
        self.usr_job_fc = Linear(16, 16)

        # 新建一个FC层,用于整合用户数据信息
        self.usr_combined = Linear(80, 200, act='tanh')
        """ define network layer for embedding usr info """
        # 对电影ID信息做映射,并紧接着一个FC层
        MOV_DICT_SIZE = Dataset.max_mov_id + 1
        self.mov_emb = Embedding([MOV_DICT_SIZE, 32])
        self.mov_fc = Linear(32, 32)

        # 对电影类别做映射
        CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1
        self.mov_cat_emb = Embedding([CATEGORY_DICT_SIZE, 32], is_sparse=False)
        self.mov_cat_fc = Linear(32, 32)

        # 对电影名称做映射
        MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1
        self.mov_title_emb = Embedding([MOV_TITLE_DICT_SIZE, 32],
                                       is_sparse=False)
        self.mov_title_conv = Conv2D(1,
                                     1,
                                     filter_size=(3, 1),
                                     stride=(2, 1),
                                     padding=0,
                                     act='relu')
        self.mov_title_conv2 = Conv2D(1,
                                      1,
                                      filter_size=(3, 1),
                                      stride=1,
                                      padding=0,
                                      act='relu')

        # 新建一个FC层,用于整合电影特征
        self.mov_concat_embed = Linear(96, 200, act='tanh')
예제 #8
0
	def __init__(self, name_scope):
		super(MNIST, self).__init__(name_scope)
		name_scope = self.full_name()
		# 定义一层全连接层,输出维度是1,激活函数为None,即不使用激活函数
		# self.fc = FC(name_scope, size=1, act=None)
		self.fc = Linear(28 * 28, output_dim=1, act=None)
예제 #9
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            ReflectionPad2D(1),
            Spectralnorm(
                Conv2D(input_nc,
                       ndf,
                       filter_size=4,
                       stride=2,
                       padding=0,
                       bias_attr=True)),
            leaky_relu(0.2)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                ReflectionPad2D(1),
                Spectralnorm(
                    Conv2D(ndf * mult,
                           ndf * mult * 2,
                           filter_size=4,
                           stride=2,
                           padding=0,
                           bias_attr=True)),
                leaky_relu(0.2)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            ReflectionPad2D(1),
            Spectralnorm(
                Conv2D(ndf * mult,
                       ndf * mult * 2,
                       filter_size=4,
                       stride=1,
                       padding=0,
                       bias_attr=True)),
            leaky_relu(0.2)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = Spectralnorm(Linear(ndf * mult, 1, bias_attr=False))
        self.gmp_fc = Spectralnorm(Linear(ndf * mult, 1, bias_attr=False))
        self.conv1x1 = Conv2D(ndf * mult * 2,
                              ndf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.leaky_relu = leaky_relu(0.2)

        self.pad = ReflectionPad2D(1)
        self.conv = Spectralnorm(
            Conv2D(ndf * mult,
                   1,
                   filter_size=4,
                   stride=1,
                   padding=0,
                   bias_attr=False))

        self.model = fluid.dygraph.Sequential(*model)
예제 #10
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            ReflectionPad2D(1),
            spectral_norm(
                Conv2D(input_nc,
                       ndf,
                       filter_size=4,
                       stride=2,
                       padding=0,
                       bias_attr=fluid.ParamAttr(
                           initializer=fluid.initializer.Uniform(
                               low=-1 / math.sqrt(input_nc * 16),
                               high=1 / math.sqrt(input_nc * 16))))),
            LeakyReLU(0.2, False)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                ReflectionPad2D(1),
                spectral_norm(
                    Conv2D(ndf * mult,
                           ndf * mult * 2,
                           filter_size=4,
                           stride=2,
                           padding=0,
                           bias_attr=fluid.ParamAttr(
                               initializer=fluid.initializer.Uniform(
                                   low=-1 / math.sqrt(ndf * mult * 16),
                                   high=1 / math.sqrt(ndf * mult * 16))))),
                LeakyReLU(0.2, False)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            ReflectionPad2D(1),
            spectral_norm(
                Conv2D(ndf * mult,
                       ndf * mult * 2,
                       filter_size=4,
                       stride=1,
                       padding=0,
                       bias_attr=fluid.ParamAttr(
                           initializer=fluid.initializer.Uniform(
                               low=-1 / math.sqrt(ndf * mult * 16),
                               high=1 / math.sqrt(ndf * mult * 16))))),
            LeakyReLU(0.2, False)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = spectral_norm(Linear(ndf * mult, 1, bias_attr=False))
        self.gmp_fc = spectral_norm(Linear(ndf * mult, 1, bias_attr=False))
        self.conv1x1 = Conv2D(
            ndf * mult * 2,
            ndf * mult,
            filter_size=1,
            stride=1,
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-1 / math.sqrt(ndf * mult * 2),
                high=1 / math.sqrt(ndf * mult * 2))))
        self.leaky_relu = LeakyReLU(0.2, False)

        self.pad = ReflectionPad2D(1)
        self.conv = spectral_norm(
            Conv2D(ndf * mult,
                   1,
                   filter_size=4,
                   stride=1,
                   padding=0,
                   bias_attr=False))

        self.model = Sequential(*model)
예제 #11
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        self.DownBlock1_1 = ReflectionPad2D(3)
        self.DownBlock1_2 = Conv2D(3,
                                   64,
                                   filter_size=7,
                                   stride=1,
                                   padding=0,
                                   bias_attr=False)
        self.DownBlock1_4 = ReLU(False)

        self.DownBlock2_1 = ReflectionPad2D(1)
        self.DownBlock2_2 = Conv2D(64,
                                   128,
                                   filter_size=3,
                                   stride=2,
                                   padding=0,
                                   bias_attr=False)
        self.DownBlock2_4 = ReLU(False)

        self.DownBlock3_1 = ReflectionPad2D(1)
        self.DownBlock3_2 = Conv2D(128,
                                   256,
                                   filter_size=3,
                                   stride=2,
                                   padding=0,
                                   bias_attr=False)
        self.DownBlock3_4 = ReLU(False)
        n_downsampling = 2
        # Down-Sampling
        self.DownBlock1 = ResnetBlock(256, use_bias=False)
        self.DownBlock2 = ResnetBlock(256, use_bias=False)
        self.DownBlock3 = ResnetBlock(256, use_bias=False)
        self.DownBlock4 = ResnetBlock(256, use_bias=False)
        # Down-Sampling Bottleneck
        mult = 4
        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)

        self.conv1x1 = Conv2D(
            ngf * mult * 2,
            ngf * mult,
            filter_size=1,
            stride=1,
            bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
                low=-1 / math.sqrt(ngf * mult * 2),
                high=1 / math.sqrt(ngf * mult * 2))))
        self.relu = ReLU(False)

        # Gamma, Beta block
        if self.light:
            FC = [
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                ReLU(False),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                ReLU(False)
            ]
        else:
            FC = [
                Linear(img_size // mult * img_size // mult * ngf * mult,
                       ngf * mult,
                       bias_attr=False),
                ReLU(False),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                ReLU(False)
            ]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i + 1),
                    ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                Upsample(scales=2, resamples='NEAREST'),
                ReflectionPad2D(1),
                Conv2D(ngf * mult,
                       int(ngf * mult / 2),
                       filter_size=3,
                       stride=1,
                       padding=0,
                       bias_attr=False),
                ILN(int(ngf * mult / 2)),
                ReLU(False)
            ]

        UpBlock2 += [
            ReflectionPad2D(3),
            Conv2D(ngf,
                   output_nc,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            Tanh()
        ]

        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
예제 #12
0
 def __init__(self, d_inner_hid, d_model, dropout_rate):
     super(FFN, self).__init__()
     self.dropout_rate = dropout_rate
     self.fc1 = Linear(input_dim=d_model, output_dim=d_inner_hid, act="relu")
     self.fc2 = Linear(input_dim=d_inner_hid, output_dim=d_model)
예제 #13
0
 def __init__(self):
     super(Regressor,self).__init__()
     # Layer Setting
     self.fc=Linear(input_dim=13,output_dim=1,act=None)
예제 #14
0
    def __init__(self, config, return_pooled_out=True, use_fp16=False):
        super(BertModelLayer, self).__init__()

        self._emb_size = config['hidden_size']
        self._n_layer = config['num_hidden_layers']
        self._n_head = config['num_attention_heads']
        self._voc_size = config['vocab_size']
        self._max_position_seq_len = config['max_position_embeddings']
        self._sent_types = config['type_vocab_size']
        self._hidden_act = config['hidden_act']
        self._prepostprocess_dropout = config['hidden_dropout_prob']
        self._attention_dropout = config['attention_probs_dropout_prob']
        self.return_pooled_out = return_pooled_out

        self._word_emb_name = "word_embedding"
        self._pos_emb_name = "pos_embedding"
        self._sent_emb_name = "sent_embedding"
        self._dtype = "float16" if use_fp16 else "float32"

        self._param_initializer = fluid.initializer.TruncatedNormal(
            scale=config['initializer_range'])

        self._src_emb = Embedding(size=[self._voc_size, self._emb_size],
                                  param_attr=fluid.ParamAttr(
                                      name=self._word_emb_name,
                                      initializer=self._param_initializer),
                                  dtype=self._dtype)

        self._pos_emb = Embedding(
            size=[self._max_position_seq_len, self._emb_size],
            param_attr=fluid.ParamAttr(name=self._pos_emb_name,
                                       initializer=self._param_initializer),
            dtype=self._dtype)

        self._sent_emb = Embedding(size=[self._sent_types, self._emb_size],
                                   param_attr=fluid.ParamAttr(
                                       name=self._sent_emb_name,
                                       initializer=self._param_initializer),
                                   dtype=self._dtype)

        self.pooled_fc = Linear(input_dim=self._emb_size,
                                output_dim=self._emb_size,
                                param_attr=fluid.ParamAttr(
                                    name="pooled_fc.w_0",
                                    initializer=self._param_initializer),
                                bias_attr="pooled_fc.b_0",
                                act="tanh")

        self.pre_process_layer = PrePostProcessLayer(
            "nd", self._emb_size, self._prepostprocess_dropout, "")

        self._encoder = EncoderLayer(
            hidden_act=self._hidden_act,
            n_layer=self._n_layer,
            n_head=self._n_head,
            d_key=self._emb_size // self._n_head,
            d_value=self._emb_size // self._n_head,
            d_model=self._emb_size,
            d_inner_hid=self._emb_size * 4,
            prepostprocess_dropout=self._prepostprocess_dropout,
            attention_dropout=self._attention_dropout,
            relu_dropout=0,
            preprocess_cmd="",
            postprocess_cmd="dan",
            param_initializer=self._param_initializer)
예제 #15
0
 def __init__(self):
     super(Regressor, self).__init__()
     
     # 定义一层全连接层,输出维度是1,激活函数为None,即不使用激活函数
     self.fc = Linear(input_dim=13, output_dim=1, act=None)
    def __init__(self, layers=50, class_dim=101):
        super(ResNet3D, self).__init__()
        self.layers = layers
        supported_layers = [18, 34, 50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(
                supported_layers, layers)

        if layers == 18:
            depth = [2, 2, 2, 2]
        elif layers == 34 or layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_channels = [64, 256, 512, 1024
                        ] if layers >= 50 else [64, 64, 128, 256]
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=[1, 2, 2],
                                act="relu",
                                name="conv1")

        self.block_list = []
        if layers >= 50:
            for block in range(len(depth)):
                shortcut = False
                for i in range(depth[block]):
                    if layers in [101, 152] and block == 2:
                        if i == 0:
                            conv_name = "res" + str(block + 2) + "a"
                        else:
                            conv_name = "res" + str(block + 2) + "b" + str(i)
                    else:
                        conv_name = "res" + str(block + 2) + chr(97 + i)
                    bottleneck_block = self.add_sublayer(
                        conv_name,
                        BottleneckBlock(
                            num_channels=num_channels[block]
                            if i == 0 else num_filters[block] * 4,
                            num_filters=num_filters[block],
                            stride=2 if i == 0 and block != 0 else 1,
                            shortcut=shortcut,
                            name=conv_name))
                    self.block_list.append(bottleneck_block)
                    shortcut = True
        else:
            for block in range(len(depth)):
                shortcut = False
                for i in range(depth[block]):
                    conv_name = "res" + str(block + 2) + chr(97 + i)
                    basic_block = self.add_sublayer(
                        conv_name,
                        BasicBlock(num_channels=num_channels[block]
                                   if i == 0 else num_filters[block],
                                   num_filters=num_filters[block],
                                   stride=2 if i == 0 and block != 0 else 1,
                                   shortcut=shortcut,
                                   name=conv_name))
                    self.block_list.append(basic_block)
                    shortcut = True

        fc_in_channel = num_channels[-1] * 2
        stdv = 1.0 / math.sqrt(fc_in_channel * 1.0)
        self.out = Linear(fc_in_channel,
                          class_dim,
                          act='softmax',
                          param_attr=ParamAttr(
                              initializer=fluid.initializer.Uniform(
                                  -stdv, stdv),
                              name="fc_0.w_0"),
                          bias_attr=ParamAttr(name="fc_0.b_0"))
 def __init__(self, d_inner_hid, d_hid, dropout_rate):
     super(PositionwiseFeedForwardLayer, self).__init__()
     self._i2h = Linear(d_hid, d_inner_hid, act="relu")
     self._h2o = Linear(d_inner_hid, d_hid)
     self._dropout_rate = dropout_rate
예제 #18
0
    def __init__(self,
                 Data,
                 embedding_weight,
                 gru_steps=10,
                 gru_num_layers=1,
                 init_scale=0.1):
        super(Model_2_dnngru, self).__init__()
        self.init_scale = init_scale

        self.Data = Data
        USR_ID_SIZE = self.Data.user_id_size
        self.usr_id_emb = Embedding([USR_ID_SIZE, 32])
        self.usr_fc = Linear(32, 32)

        USR_GENDER_SIZE = self.Data.user_gender_size + 1
        self.usr_gender_emb = Embedding([USR_GENDER_SIZE, 4])
        self.usr_gender_fc = Linear(4, 4)

        USR_AGE_LEV_SIZE = self.Data.user_age_level_size + 1
        self.usr_age_emb = Embedding([USR_AGE_LEV_SIZE, 16])
        self.usr_age_fc = Linear(16, 16)

        USR_CITY_LEV_SIZE = self.Data.user_city_level_size + 1
        self.usr_city_emb = Embedding([USR_CITY_LEV_SIZE, 16])
        self.usr_city_fc = Linear(16, 16)

        ITM_ID_SIZE = self.Data.item_id_size
        item_emb_weight = fluid.ParamAttr(
            learning_rate=0.5,
            initializer=fluid.initializer.NumpyArrayInitializer(
                embedding_weight),
            trainable=False)
        self.itm_id_emb = Embedding([ITM_ID_SIZE, 200],
                                    param_attr=item_emb_weight)

        self.click_fc = Linear(1, 16)
        # (32+4+16+16)+(200+128+128)+16=540
        #         self.all_combined = Linear(540, 512, act='tanh')

        # GRU,前向传播时,实际预测时取最后一步的结果,构建序列时0填充在实际元素之前
        self.gru_steps = gru_steps
        self.gru_num_layers = gru_num_layers
        self.gru_hidden_size = 540
        self.simple_gru_rnn = SimpleGRURNN(hidden_size=self.gru_hidden_size,
                                           num_steps=self.gru_steps,
                                           num_layers=self.gru_num_layers,
                                           init_scale=0.1,
                                           dropout=None)

        # 最后映射到每一个维度概率的参数
        self.softmax_weight = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[self.gru_hidden_size, ITM_ID_SIZE],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
                low=-self.init_scale, high=self.init_scale))
        self.softmax_bias = self.create_parameter(
            attr=fluid.ParamAttr(),
            shape=[ITM_ID_SIZE],
            dtype="float32",
            default_initializer=fluid.initializer.UniformInitializer(
                low=-self.init_scale, high=self.init_scale))

        #         self.gru_fc = Linear(self.gru_hidden_size, 512, act='sigmoid')
        self.gru_fc_out = Linear(self.gru_hidden_size, ITM_ID_SIZE)
예제 #19
0
 def __init__(self, in_size, out_size):
     super(LinearNetMultiInput, self).__init__()
     self._linear1 = Linear(in_size, out_size)
     self._linear2 = Linear(in_size, out_size)
예제 #20
0
 def __init__(self):
     super(MyDNN, self).__init__()
     self.hidden1 = Linear(100, 100, act='relu')
     self.hidden2 = Linear(100, 100, act='relu')
     self.hidden3 = Linear(100, 100, act='relu')
     self.hidden4 = Linear(3 * 100 * 100, 10, act='softmax')