예제 #1
0
 def no_detach_single(self):
     data = self.generate_Data()
     with fluid.dygraph.guard():
         linear_w_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(5.0))
         linear_b_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(6.0))
         linear = Linear(4,
                         10,
                         param_attr=linear_w_param_attrs,
                         bias_attr=linear_b_param_attrs)
         linear1_w_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(7.0))
         linear1_b_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(8.0))
         linear1 = Linear(10,
                          1,
                          param_attr=linear1_w_param_attrs,
                          bias_attr=linear1_b_param_attrs)
         data = to_variable(data)
         x = linear(data)
         x1 = linear1(x)
         loss = x1
         # print(loss, loss.shape)
         loss.backward()
         return x.gradient()
예제 #2
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [ReflectionPad2d([1,1,1,1]),
                # TODO 谱归一化
                 Conv2D(input_nc, ndf, filter_size=4, stride=2, padding=0, bias_attr=True),
                 LeakyReLU(0.2, True)]

        for i in range(1, n_layers - 2):
            mult = 2 ** (i - 1)
            model += [ReflectionPad2d([1,1,1,1]),
                      Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=2, padding=0, bias_attr=True),
                      LeakyReLU(0.2, True)]

        mult = 2 ** (n_layers - 2 - 1)
        model += [ReflectionPad2d([1,1,1,1]),
                  Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=1, padding=0, bias_attr=True),
                  LeakyReLU(0.2, True)]

        # Class Activation Map
        mult = 2 ** (n_layers - 2)
        self.gap_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ndf * mult * 2, ndf * mult, filter_size=1, stride=1, bias_attr=True)
        self.leaky_relu = LeakyReLU(0.2, True)

        self.pad = ReflectionPad2d([1,1,1,1])
        self.conv = Conv2D(ndf * mult, 1, filter_size=4, stride=1, padding=0, bias_attr=False)

        self.model = Sequential(*model)
예제 #3
0
    def __init__(self, in_nc=64, out_nc=64, light=True, use_bias=True):
        super(MLP, self).__init__()
        # ops for  Gamma, Beta block
        self.light = light

        FC = [
            Linear(in_nc,
                   out_nc,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias),
                   act='relu'),
            Linear(out_nc,
                   out_nc,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias),
                   act='relu')
        ]

        self.gamma = Linear(out_nc,
                            out_nc,
                            param_attr=init_w(),
                            bias_attr=init_bias(use_bias))  # FC256
        self.beta = Linear(out_nc,
                           out_nc,
                           param_attr=init_w(),
                           bias_attr=init_bias(use_bias))  # FC256
        self.FC = Sequential(*FC)
예제 #4
0
 def __init__(self, d_inner_hid, d_model, dropout_rate):
     super(FFN, self).__init__()
     self.dropout_rate = dropout_rate
     self.fc1 = Linear(input_dim=d_model,
                       output_dim=d_inner_hid,
                       act="relu")
     self.fc2 = Linear(input_dim=d_inner_hid, output_dim=d_model)
 def __init__(self):
     super(CNN_RNN_Model4, self).__init__()
     self.EfficientNet = CNNEnoder()
     self.RNN = ConvBLSTM(in_channels=512, hidden_channels=64, kernel_size=(3, 3), num_layers=1)
     self.fc1 = Linear(1280, 512, param_attr=ParamAttr(initializer=fluid.initializer.XavierInitializer()))
     self.fc2 = Linear(512, 256, param_attr=ParamAttr(initializer=fluid.initializer.XavierInitializer()))
     self.fc3 = Linear(256, 2, param_attr=ParamAttr(initializer=fluid.initializer.XavierInitializer()))
예제 #6
0
    def __init__(self, num_classes=1):
        super(CNN_LeakyRelu, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='leaky_relu')
        self.bn1 = BatchNorm(64)
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='leaky_relu')
        self.bn2 = BatchNorm(128)
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='leaky_relu')
        self.bn3 = BatchNorm(256)
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='leaky_relu')
        self.bn4 = BatchNorm(512)
        self.conv5 = Conv2D(512,
                            1024,
                            5,
                            padding=2,
                            stride=1,
                            act='leaky_relu')
        self.bn5 = BatchNorm(1024)
        self.conv6 = Conv2D(1024,
                            1024,
                            5,
                            padding=2,
                            stride=1,
                            act='leaky_relu')
        self.bn6 = BatchNorm(1024)

        self.fc1 = Linear(1024 * 7 * 7, 1024, act='leaky_relu')
        self.fc2 = Linear(1024, num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
예제 #7
0
    def __init__(self,
                 hidden_act,
                 d_inner_hid,
                 d_model,
                 dropout_rate,
                 param_initializer=None,
                 name=""):
        super(PositionwiseFeedForwardLayer, self).__init__()

        self._i2h = Linear(input_dim=d_model,
                           output_dim=d_inner_hid,
                           param_attr=fluid.ParamAttr(
                               name=name + '_fc_0.w_0',
                               initializer=param_initializer),
                           bias_attr=name + '_fc_0.b_0',
                           act=hidden_act)

        self._h2o = Linear(input_dim=d_inner_hid,
                           output_dim=d_model,
                           param_attr=fluid.ParamAttr(
                               name=name + '_fc_1.w_0',
                               initializer=param_initializer),
                           bias_attr=name + '_fc_1.b_0')

        self._dropout_rate = dropout_rate
예제 #8
0
 def __init__(self, name_scope):
     super(Regressor, self).__init__(name_scope)
     name_scope = self.full_name()
     # 定义一层全连接层,输出维度是1,激活函数为None,即不使用激活函数
     self.fc = Linear(input_dim=13, output_dim=8, act='relu')
     self.sc = Linear(input_dim=8, output_dim=4, act='relu')
     self.tc = Linear(input_dim=4, output_dim=1, act=None)
예제 #9
0
    def __init__(self, layers=16, use_bn=False, num_classes=1000):
        super(VGG, self).__init__()
        self.layers = layers
        self.use_bn = use_bn
        supported_layers = [16, 19]
        assert layers in supported_layers

        if layers == 16:
            depth = [2, 2, 3, 3, 3]
        elif layers == 19:
            depth = [2, 2, 4, 4, 4]

        num_channels = [3, 64, 128, 256, 512]
        num_filters = [64, 128, 256, 512, 512]

        self.layer1 = fluid.dygraph.Sequential(*self.make_layer(num_channels[0], num_filters[0], depth[0], use_bn, name='layer1'))
        self.layer2 = fluid.dygraph.Sequential(*self.make_layer(num_channels[1], num_filters[1], depth[1], use_bn, name='layer2'))
        self.layer3 = fluid.dygraph.Sequential(*self.make_layer(num_channels[2], num_filters[2], depth[2], use_bn, name='layer3'))
        self.layer4 = fluid.dygraph.Sequential(*self.make_layer(num_channels[3], num_filters[3], depth[3], use_bn, name='layer4'))
        self.layer5 = fluid.dygraph.Sequential(*self.make_layer(num_channels[4], num_filters[4], depth[4], use_bn, name='layer5'))

        self.classifier = fluid.dygraph.Sequential(
                Linear(input_dim=512 * 7 * 7, output_dim=4096, act='relu'),
                Dropout(),
                Linear(input_dim=4096, output_dim=4096, act='relu'),
                Dropout(),
                Linear(input_dim=4096, output_dim=num_classes))
                
        self.out_dim = 512 * 7 * 7
예제 #10
0
 def __init__(self):
     super(MyDNN, self).__init__()
     self.hidden1 = Linear(3 * 224 * 224, 1000, act='relu')
     self.hidden2 = Linear(1000, 700, act='relu')
     self.hidden22 = Linear(700, 400, act='relu')
     self.hidden3 = Linear(400, 100, act='relu')
     self.out = Linear(100, 25, act='softmax')
예제 #11
0
 def __init__(self,
              d_key,
              d_value,
              d_model,
              n_head=1,
              dropout_rate=0.,
              param_initializer=None):
     super(MultiHeadAttention, self).__init__()
     self.n_head = n_head
     self.d_key = d_key
     self.d_value = d_value
     self.d_model = d_model
     self.dropout_rate = dropout_rate
     self.q_fc = Linear(
         input_dim=d_model,
         output_dim=d_key * n_head,
         bias_attr=False,
         param_attr=fluid.ParamAttr(initializer=param_initializer))
     self.k_fc = Linear(
         input_dim=d_model,
         output_dim=d_key * n_head,
         bias_attr=False,
         param_attr=fluid.ParamAttr(initializer=param_initializer))
     self.v_fc = Linear(
         input_dim=d_model,
         output_dim=d_value * n_head,
         bias_attr=False,
         param_attr=fluid.ParamAttr(initializer=param_initializer))
     self.proj_fc = Linear(
         input_dim=d_value * n_head,
         output_dim=d_model,
         bias_attr=False,
         param_attr=fluid.ParamAttr(initializer=param_initializer))
예제 #12
0
	def __init__(self):
		super(MyDNN, self).__init__()
		self.hidden1 = Linear(100, 100, act="relu")
		self.hidden2 = Linear(100, 100, act="relu")
		self.hidden3 = Linear(100, 100, act="relu")
		self.hidden4 = Linear(100, 100, act="relu")
		self.hidden5 = Linear(3*100*100, 10, act='softmax')
예제 #13
0
    def bi_dynamic_lstm(self, input, hidden_size):
        """
        bi_lstm layer
        """
        fw_in_proj = Linear(
            input_dim=self.emb_size,
            output_dim=4 * hidden_size,
            param_attr=fluid.ParamAttr(name="fw_fc.w"),
            bias_attr=False)
        fw_in_proj = fw_in_proj(input)

        forward = pd_layers.DynamicLSTMLayer(
            size=4 * hidden_size,
            is_reverse=False,
            param_attr=fluid.ParamAttr(name="forward_lstm.w"),
            bias_attr=fluid.ParamAttr(name="forward_lstm.b")).ops()

        forward = forward(fw_in_proj)

        rv_in_proj = Linear(
            input_dim=self.emb_size,
            output_dim=4 * hidden_size,
            param_attr=fluid.ParamAttr(name="rv_fc.w"),
            bias_attr=False)
        rv_in_proj = rv_in_proj(input)

        reverse = pd_layers.DynamicLSTMLayer(
            4 * hidden_size,
            'lstm'
            is_reverse=True,
            param_attr=fluid.ParamAttr(name="reverse_lstm.w"),
            bias_attr=fluid.ParamAttr(name="reverse_lstm.b")).ops()
        reverse = reverse(rv_in_proj)

        return [forward, reverse]
예제 #14
0
    def __init__(self, name_scope):
        super(G, self).__init__(name_scope)
        name_scope = self.full_name()
        # My_G的代码
        """
        模型流程:2次全连接+1向上采样+1卷积+1向上采样+1卷积
        注意:除最后一次卷积运算后,其余的输出做一次归一化层;其余用 leaky_relu
        """
        self.fc1 = Linear(input_dim=100, output_dim=1024)
        self.bn1 = fluid.dygraph.BatchNorm(num_channels=1024, act='relu')

        self.fc2 = Linear(input_dim=1024, output_dim=128 * 8 * 8)
        self.bn2 = fluid.dygraph.BatchNorm(num_channels=128 * 8 * 8,
                                           act='relu')

        self.conv1 = Conv2D(num_channels=128,
                            num_filters=64,
                            filter_size=5,
                            padding=2)
        self.bn3 = fluid.dygraph.BatchNorm(num_channels=64, act='relu')

        self.conv2 = Conv2D(num_channels=64,
                            num_filters=3,
                            filter_size=5,
                            padding=2,
                            act='tanh')
예제 #15
0
파일: bert.py 프로젝트: zhong110020/models
    def __init__(self,
                 config,
                 return_pooled_out=True,
                 weight_sharing=True,
                 use_fp16=False):
        super(PretrainModelLayer, self).__init__()
        self.config = config
        self._voc_size = config['vocab_size']
        self._emb_size = config['hidden_size']
        self._hidden_act = config['hidden_act']
        self._prepostprocess_dropout = config['hidden_dropout_prob']

        self._word_emb_name = "word_embedding"
        self._param_initializer = fluid.initializer.TruncatedNormal(
            scale=config['initializer_range'])
        self._weight_sharing = weight_sharing
        self.use_fp16 = use_fp16
        self._dtype = "float16" if use_fp16 else "float32"

        self.bert_layer = BertModelLayer(
            config=self.config, return_pooled_out=True, use_fp16=self.use_fp16)

        self.pre_process_layer = PrePostProcessLayer(
            "n", self._emb_size, self._prepostprocess_dropout, "pre_encoder")

        self.pooled_fc = Linear(
            input_dim=self._emb_size,
            output_dim=self._emb_size,
            param_attr=fluid.ParamAttr(
                name="mask_lm_trans_fc.w_0",
                initializer=self._param_initializer),
            bias_attr="mask_lm_trans_fc.b_0",
            act="tanh")

        self.mask_lm_out_bias_attr = fluid.ParamAttr(
            name="mask_lm_out_fc.b_0",
            initializer=fluid.initializer.Constant(value=0.0))

        if not self._weight_sharing:
            self.out_fc = Linear(
                input_dim=self._emb_size,
                output_dim=self._voc_size,
                param_attr=fluid.ParamAttr(
                    name="mask_lm_out_fc.w_0",
                    initializer=self._param_initializer),
                bias_attr=self.mask_lm_out_bias_attr)
        else:
            self.fc_create_params = self.create_parameter(
                shape=[self._voc_size],
                dtype=self._dtype,
                attr=self.mask_lm_out_bias_attr,
                is_bias=True)

        self.next_sent_fc = Linear(
            input_dim=self._emb_size,
            output_dim=2,
            param_attr=fluid.ParamAttr(
                name="next_sent_fc.w_0", initializer=self._param_initializer),
            bias_attr="next_sent_fc.b_0")
예제 #16
0
 def __init__(self, in_size, out_size):
     super(LayerSaved, self).__init__()
     self.hidden = 100
     self._linear_0 = Linear(in_size, self.hidden)
     self._linear_1_0 = Linear(self.hidden, self.hidden)
     self._linear_1_1 = Linear(self.hidden, self.hidden)
     self._linear_2 = Linear(self.hidden, out_size)
     self._scale = paddle.to_tensor(9.9)
예제 #17
0
 def __init__(self):
     super(MyLeNet, self).__init__()
     self.c1 = Conv2D(3, 6, 5, 1)
     self.s2 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
     self.c3 = Conv2D(6, 16, 5, 1)
     self.s4 = Pool2D(pool_size=2, pool_type='max', pool_stride=2)
     self.c5 = Conv2D(16, 120, 5, 1)
     self.f6 = Linear(120, 84, act='relu')
     self.f7 = Linear(84, 10, act='softmax')
예제 #18
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            ReflectionPad2D(1),
            Spectralnorm(layer=Conv2D(input_nc,
                                      num_filters=ndf,
                                      filter_size=4,
                                      stride=2,
                                      bias_attr=True)),
            LeakyReLU(alpha=0.2)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                ReflectionPad2D(1),
                Spectralnorm(layer=Conv2D(ndf * mult,
                                          num_filters=ndf * mult * 2,
                                          filter_size=4,
                                          stride=2,
                                          bias_attr=True)),
                LeakyReLU(alpha=0.2)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            ReflectionPad2D(1),
            Spectralnorm(layer=Conv2D(ndf * mult,
                                      num_filters=ndf * mult * 2,
                                      filter_size=4,
                                      stride=1,
                                      bias_attr=True)),
            LeakyReLU(alpha=0.2)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = Spectralnorm(layer=Linear(ndf *
                                                mult, 1, bias_attr=False))
        self.gmp_fc = Spectralnorm(layer=Linear(ndf *
                                                mult, 1, bias_attr=False))
        self.conv1x1 = Conv2D(ndf * mult * 2,
                              num_filters=ndf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.leaky_relu = LeakyReLU(alpha=0.2)

        self.pad = ReflectionPad2D(1)

        self.conv = Spectralnorm(layer=Conv2D(ndf * mult,
                                              num_filters=1,
                                              filter_size=4,
                                              stride=1,
                                              bias_attr=False))

        self.model = Sequential(*model)
예제 #19
0
 def __init__(self, num_scales, each_scales_size, point_scales_list, k=40):
     super(PointcloudCls, self).__init__()
     self.latentfeature = Latentfeature(num_scales, each_scales_size, point_scales_list)
     self.fc1 = Linear(1920, 1024)
     self.fc2 = Linear(1024, 512)
     self.fc3 = Linear(512, 256)
     self.fc4 = Linear(256, k)
     # self.dropout = nn.Dropout(p=0.3)
     self.bn1 = BatchNorm(1024, act='relu')
     self.bn2 = BatchNorm(512, act='relu')
     self.bn3 = BatchNorm(256, act='relu')
예제 #20
0
class Discriminator(dygraph.Layer):
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [ReflectionPad2d([1,1,1,1]),
                # TODO 谱归一化
                 Conv2D(input_nc, ndf, filter_size=4, stride=2, padding=0, bias_attr=True),
                 LeakyReLU(0.2, True)]

        for i in range(1, n_layers - 2):
            mult = 2 ** (i - 1)
            model += [ReflectionPad2d([1,1,1,1]),
                      Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=2, padding=0, bias_attr=True),
                      LeakyReLU(0.2, True)]

        mult = 2 ** (n_layers - 2 - 1)
        model += [ReflectionPad2d([1,1,1,1]),
                  Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=1, padding=0, bias_attr=True),
                  LeakyReLU(0.2, True)]

        # Class Activation Map
        mult = 2 ** (n_layers - 2)
        self.gap_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ndf * mult * 2, ndf * mult, filter_size=1, stride=1, bias_attr=True)
        self.leaky_relu = LeakyReLU(0.2, True)

        self.pad = ReflectionPad2d([1,1,1,1])
        self.conv = Conv2D(ndf * mult, 1, filter_size=4, stride=1, padding=0, bias_attr=False)

        self.model = Sequential(*model)

    def forward(self, input):
        x = self.model(input)

        gap = layers.adaptive_pool2d(x, 1, pool_type='avg')
        gap_logit = self.gap_fc(layers.reshape(gap, [x.shape[0], -1]))
        gap_weight = list(self.gap_fc.parameters())[0]
        gap = x * layers.unsqueeze(layers.unsqueeze(gap_weight, 2), 3)

        gmp = layers.adaptive_pool2d(x, 1, pool_type='max')
        gap_logit = self.gmp_fc(layers.reshape(gmp, [x.shape[0], -1]))
        gmp_weight = list(self.gmp_fc.parameters())[0]
        gmp = x * layers.unsqueeze(layers.unsqueeze(gmp_weight, 2), 3)

        cam_logit = layers.concat([gap_logit, gmp_logit], 1)
        x = layers.concat([gap, gmp], 1)
        x = self.leaky_relu(self.conv1x1(x))

        heatmap = layers.reduce_sum(x, dim=1, keepdim=True)

        x = self.pad(x)
        out = self.conv(x)

        return out, cam_logit, heatmap
예제 #21
0
    def __init__(self, in_size, out_size, load_path):
        super(LayerLoadFinetune, self).__init__()
        # Test duplicate name
        self._linear_0 = Linear(in_size, in_size)
        self._linear_1_0 = Linear(out_size, in_size)
        self._linear_1_1 = Linear(out_size, in_size)
        self._linear_2 = Linear(out_size, out_size)
        self._scale = paddle.to_tensor(9.9)

        # Load multiple times
        self._load_l1 = paddle.jit.load(load_path)
        self._load_l2 = paddle.jit.load(load_path)
예제 #22
0
    def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300):
        super(CNNEnoder, self).__init__()
        model_name = "efficientnet-b0"
        override_params = {"num_classes": 1280}
        blocks_args, global_params = get_model_params(model_name, override_params=override_params)

        self.drop_p = drop_p
        self.BackBone = EfficientNet(blocks_args, global_params)
        self.fc1 = Linear(1280, fc_hidden1)
        self.bn1 = BatchNorm(fc_hidden1)
        self.fc2 = Linear(fc_hidden1, fc_hidden2)
        self.bn2 = BatchNorm(fc_hidden2)
        self.fc3 = Linear(fc_hidden2, CNN_embed_dim)
 def __init__(self):
     super(MLP, self).__init__()
     self._user_latent = Linear(1000, 256)
     self._item_latent = Linear(100, 256)
     self._match_layers = []
     self._hid_sizes = [128, 64]
     for i in range(len(self._hid_sizes)):
         self._match_layers.append(
             self.add_sublayer(
                 'match_layer_%d' % i,
                 Linear(256 * 2 if i == 0 else self._hid_sizes[i - 1],
                        self._hid_sizes[i],
                        act='relu')))
예제 #24
0
 def __init__(self, hidden_size, bias=False, init_scale=0.1):
     super(AttentionLayer, self).__init__()
     self.input_proj = Linear(
         hidden_size,
         hidden_size,
         param_attr=ParamAttr(initializer=UniformInitializer(
             low=-init_scale, high=init_scale)),
         bias_attr=bias)
     self.output_proj = Linear(
         hidden_size + hidden_size,
         hidden_size,
         param_attr=ParamAttr(initializer=UniformInitializer(
             low=-init_scale, high=init_scale)),
         bias_attr=bias)
예제 #25
0
    def __init__(self, num_classes=1):
        super(CNN, self).__init__()

        self.conv1 = Conv2D(3, 64, 5, padding=2, stride=1, act='sigmoid')
        self.conv2 = Conv2D(64, 128, 5, padding=2, stride=1, act='sigmoid')
        self.conv3 = Conv2D(128, 256, 5, padding=2, stride=1, act='sigmoid')
        self.conv4 = Conv2D(256, 512, 5, padding=2, stride=1, act='sigmoid')
        self.conv5 = Conv2D(512, 1024, 5, padding=2, stride=1, act='sigmoid')
        self.conv6 = Conv2D(1024, 1024, 5, padding=2, stride=1, act='sigmoid')

        self.fc1 = Linear(1024 * 7 * 7, 1024, act='sigmoid')
        self.fc2 = Linear(1024, num_classes)

        self.pool_down = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
예제 #26
0
 def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.):
     super(MultiHeadAttention, self).__init__()
     self.n_head = n_head
     self.d_key = d_key
     self.d_value = d_value
     self.d_model = d_model
     self.dropout_rate = dropout_rate
     self.q_fc = Linear(
         input_dim=d_model, output_dim=d_key * n_head, bias_attr=False)
     self.k_fc = Linear(
         input_dim=d_model, output_dim=d_key * n_head, bias_attr=False)
     self.v_fc = Linear(
         input_dim=d_model, output_dim=d_value * n_head, bias_attr=False)
     self.proj_fc = Linear(
         input_dim=d_value * n_head, output_dim=d_model, bias_attr=False)
예제 #27
0
 def __init__(self, CNN_embed_dim=300, kernel_size=(3, 3)
              , frame_length=20, h_RNN_layers=3,
              h_RNN=128, h_FC_dim=128, drop_p=0.3, num_classes=50):
     super(DecoderRNN, self).__init__()
     self.frame_length = frame_length
     self.RNN_input_size = CNN_embed_dim
     self.h_RNN_layers = h_RNN_layers  # RNN hidden layers
     self.h_RNN = h_RNN  # RNN hidden nodes
     self.h_FC_dim = h_FC_dim
     self.drop_p = drop_p
     self.num_classes = num_classes
     self.BLSTM = ConvBLSTM(CNN_embed_dim, self.h_RNN, kernel_size=kernel_size,
                            num_layers=h_RNN_layers)
     self.fc1 = Linear(self.h_RNN*self.frame_length, self.h_FC_dim)
     self.fc2 = Linear(self.h_FC_dim, self.num_classes)
예제 #28
0
    def __init__(self, use_poster, use_mov_title, use_mov_cat, use_age_job):
        super(MovModel, self).__init__()

        # 将传入的name信息和bool型参数添加到模型类中
        self.use_mov_poster = use_poster
        self.use_mov_title = use_mov_title
        self.use_usr_age_job = use_age_job
        self.use_mov_cat = use_mov_cat

        # 获取数据集的信息,并构建训练和验证集的数据迭代器
        Dataset = MovieLen(self.use_mov_poster)
        self.Dataset = Dataset
        self.trainset = self.Dataset.train_dataset
        self.valset = self.Dataset.valid_dataset
        self.train_loader = self.Dataset.load_data(dataset=self.trainset,
                                                   mode='train')
        self.valid_loader = self.Dataset.load_data(dataset=self.valset,
                                                   mode='valid')
        """ define network layer for embedding usr info """
        # 对电影ID信息做映射,并紧接着一个Linear层
        MOV_DICT_SIZE = Dataset.max_mov_id + 1
        self.mov_emb = Embedding([MOV_DICT_SIZE, 32])
        self.mov_fc = Linear(32, 32)

        # 对电影类别做映射
        CATEGORY_DICT_SIZE = len(Dataset.movie_cat) + 1
        self.mov_cat_emb = Embedding([CATEGORY_DICT_SIZE, 32], is_sparse=False)
        self.mov_cat_fc = Linear(32, 32)

        # 对电影名称做映射
        MOV_TITLE_DICT_SIZE = len(Dataset.movie_title) + 1
        self.mov_title_emb = Embedding([MOV_TITLE_DICT_SIZE, 32],
                                       is_sparse=False)
        self.mov_title_conv = Conv2D(1,
                                     1,
                                     filter_size=(3, 1),
                                     stride=(2, 1),
                                     padding=0,
                                     act='relu')
        self.mov_title_conv2 = Conv2D(1,
                                      1,
                                      filter_size=(3, 1),
                                      stride=1,
                                      padding=0,
                                      act='relu')

        # 新建一个Linear层,用于整合电影特征
        self.mov_concat_embed = Linear(96, 200, act='tanh')
예제 #29
0
    def __init__(self, trg_vocab_size, max_length, n_layer, n_head, d_key,
                 d_value, d_model, d_inner_hid, prepostprocess_dropout,
                 attention_dropout, relu_dropout, preprocess_cmd,
                 postprocess_cmd, share_input_output_embed, word_embedder):
        super(WrapDecoder, self).__init__()

        self.emb_dropout = prepostprocess_dropout
        self.emb_dim = d_model
        self.word_embedder = word_embedder
        self.pos_encoder = Embedding(
            size=[max_length, self.emb_dim],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    position_encoding_init(max_length, self.emb_dim)),
                trainable=False))

        self.decoder = Decoder(n_layer, n_head, d_key, d_value, d_model,
                               d_inner_hid, prepostprocess_dropout,
                               attention_dropout, relu_dropout, preprocess_cmd,
                               postprocess_cmd)

        if share_input_output_embed:
            self.linear = lambda x: layers.matmul(x=x,
                                                  y=self.word_embedder.
                                                  word_embedder.weight,
                                                  transpose_y=True)
        else:
            self.linear = Linear(
                input_dim=d_model, output_dim=trg_vocab_size, bias_attr=False)
예제 #30
0
    def __init__(self, layers=50, class_dim=1):
        super(ResNet, self).__init__()
        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, "supported layers are {} but input layer is {}".format(supported_layers,
                                                                                                  layers)
        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]
        self.conv = ConvBNLayer(num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
        self.pool2d_max = Pool2D(pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
        self.bottlenect_block_list = []
        num_channels = 64
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer('bb_%d_%d' % (block, i), BottleneckBlock(num_channels=num_channels,
                                                                                           num_filters=num_filters[
                                                                                               block],
                                                     stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut))
                num_channels = bottleneck_block._num_channels_out
                self.bottlenect_block_list.append(bottleneck_block)
                shortcut = True
        self.pool2d_avg = Pool2D(pool_size=7, pool_type='avg', global_pooling=True)

        stdv = 1.0 / math.sqrt(2048 * 1.0)
        self.out = Linear(input_dim=2048, output_dim=class_dim,
                          param_attr=fluid.param_attr.ParamAttr(initializer=fluid.initializer.Uniform(-stdv, stdv)))