Esempio n. 1
0
 def no_detach_single(self):
     data = self.generate_Data()
     with fluid.dygraph.guard():
         fc_w_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(5.0))
         fc_b_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(6.0))
         fc = FC("fc",
                 10,
                 num_flatten_dims=1,
                 param_attr=fc_w_param_attrs,
                 bias_attr=fc_b_param_attrs)
         fc1_w_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(7.0))
         fc1_b_param_attrs = fluid.ParamAttr(
             initializer=fluid.initializer.Constant(8.0))
         fc1 = FC("fc",
                  1,
                  num_flatten_dims=1,
                  param_attr=fc1_w_param_attrs,
                  bias_attr=fc1_b_param_attrs)
         data = to_variable(data)
         x = fc(data)
         x1 = fc1(x)
         loss = x1
         # print(loss, loss.shape)
         loss.backward()
         return x.gradient()
Esempio n. 2
0
 def __init__(self,
              name_scope,
              d_key,
              d_value,
              d_model,
              n_head=1,
              dropout_rate=0.,
              cache=None,
              gather_idx=None,
              static_kv=False):
     super(MultiHeadAttentionLayer, self).__init__(name_scope)
     self._n_head = n_head
     self._d_key = d_key
     self._d_value = d_value
     self._d_model = d_model
     self._dropout_rate = dropout_rate
     self._q_fc = FC(name_scope=self.full_name(),
                     size=d_key * n_head,
                     bias_attr=False,
                     num_flatten_dims=2)
     self._k_fc = FC(name_scope=self.full_name(),
                     size=d_key * n_head,
                     bias_attr=False,
                     num_flatten_dims=2)
     self._v_fc = FC(name_scope=self.full_name(),
                     size=d_value * n_head,
                     bias_attr=False,
                     num_flatten_dims=2)
     self._proj_fc = FC(name_scope=self.full_name(),
                        size=self._d_model,
                        bias_attr=False,
                        num_flatten_dims=2)
Esempio n. 3
0
 def __init__(self, name_scope, d_inner_hid, d_hid, dropout_rate):
     super(PositionwiseFeedForwardLayer, self).__init__(name_scope)
     self._i2h = FC(name_scope=self.full_name(),
                    size=d_inner_hid,
                    num_flatten_dims=2,
                    act="relu")
     self._h2o = FC(name_scope=self.full_name(),
                    size=d_hid,
                    num_flatten_dims=2)
     self._dropout_rate = dropout_rate
 def __init__(self, name_scope):
     super(MLP, self).__init__(name_scope)
     self._fc1 = FC(self.full_name(),
                    3,
                    param_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.Constant(value=0.1)),
                    bias_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.Constant(value=0.1)))
     self._fc2 = FC(self.full_name(),
                    4,
                    param_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.Constant(value=0.1)),
                    bias_attr=fluid.ParamAttr(
                        initializer=fluid.initializer.Constant(value=0.1)))
    def __init__(self, name_scope):
        super(MNIST, self).__init__(name_scope)

        self._simple_img_conv_pool_1 = SimpleImgConvPool(self.full_name(),
                                                         20,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        self._simple_img_conv_pool_2 = SimpleImgConvPool(self.full_name(),
                                                         50,
                                                         5,
                                                         2,
                                                         2,
                                                         act="relu")

        pool_2_shape = 50 * 4 * 4
        SIZE = 10
        scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
        self._fc = FC(self.full_name(),
                      10,
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.NormalInitializer(
                              loc=0.0, scale=scale)),
                      act="softmax")
    def __init__(self, name_scope, layers=50, class_dim=102):
        super(ResNet, self).__init__(name_scope)

        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(self.full_name(),
                                num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu')
        self.pool2d_max = Pool2D(self.full_name(),
                                 pool_size=3,
                                 pool_stride=2,
                                 pool_padding=1,
                                 pool_type='max')

        self.bottleneck_block_list = []
        num_channels = 64
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(self.full_name(),
                                    num_channels=num_channels,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    shortcut=shortcut))
                num_channels = bottleneck_block._num_channels_out
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

        self.pool2d_avg = Pool2D(self.full_name(),
                                 pool_size=7,
                                 pool_type='avg',
                                 global_pooling=True)

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.out = FC(self.full_name(),
                      size=class_dim,
                      act='softmax',
                      param_attr=fluid.param_attr.ParamAttr(
                          initializer=fluid.initializer.Uniform(-stdv, stdv)))
Esempio n. 7
0
    def __init__(self,
                 name_scope,
                 trg_vocab_size,
                 max_length,
                 n_layer,
                 n_head,
                 d_key,
                 d_value,
                 d_model,
                 d_inner_hid,
                 prepostprocess_dropout,
                 attention_dropout,
                 relu_dropout,
                 preprocess_cmd,
                 postprocess_cmd,
                 weight_sharing,
                 caches=None,
                 gather_idx=None,
                 is_sparse=False):
        """
        The wrapper assembles together all needed layers for the encoder.
        """
        super(WrapDecoderLayer, self).__init__(name_scope)

        self._prepare_decoder_layer = PrepareEncoderDecoderLayer(
            self.full_name(),
            trg_vocab_size,
            d_model,
            max_length,
            prepostprocess_dropout,
            is_sparse=is_sparse,
            word_emb_param_name=word_emb_param_names[1],
            pos_enc_param_name=pos_enc_param_names[1])
        self._decoder_layer = DecoderLayer(self.full_name(),
                                           n_layer,
                                           n_head,
                                           d_key,
                                           d_value,
                                           d_model,
                                           d_inner_hid,
                                           prepostprocess_dropout,
                                           attention_dropout,
                                           relu_dropout,
                                           preprocess_cmd,
                                           postprocess_cmd,
                                           caches=caches,
                                           gather_idx=gather_idx)
        self._weight_sharing = weight_sharing
        if not weight_sharing:
            self._fc = FC(self.full_name(),
                          size=trg_vocab_size,
                          bias_attr=False)
 def __init__(self, name_scope):
     super(Generator, self).__init__(name_scope)
     self._fc1 = FC(self.full_name(), size=64, act='elu')
     self._fc2 = FC(self.full_name(), size=64, act='elu')
     self._fc3 = FC(self.full_name(), size=1)
 def __init__(self, name_scope):
     super(Discriminator, self).__init__(name_scope)
     self._fc1 = FC(self.full_name(), size=32, act='elu')
     self._fc2 = FC(self.full_name(), size=1)