def __init__(self):
     super(VGGNet, self).__init__()
     self.VGG16_1 = ConvPool(num_channels=3,num_filters=64,filter_size=3,pool_size=2,pool_stride=2,groups=2,pool_padding=0,
     pool_type='max',conv_stride=1,conv_padding=0,act='relu')
     self.VGG16_2 = ConvPool(num_channels=64,num_filters=128,filter_size=3,pool_size=2,pool_stride=2,groups=2,pool_padding=0,
     pool_type='max',conv_stride=1,conv_padding=0,act='relu')
     self.VGG16_3 = ConvPool(num_channels=128,num_filters=256,filter_size=3,pool_size=2,pool_stride=2,groups=3,pool_padding=0,
     pool_type='max',conv_stride=1,conv_padding=0,act='relu')
     self.VGG16_4 = ConvPool(num_channels=256,num_filters=512,filter_size=3,pool_size=2,pool_stride=2,groups=3,pool_padding=0,
     pool_type='max',conv_stride=1,conv_padding=0,act='relu')
     self.VGG16_5 = ConvPool(num_channels=512,num_filters=512,filter_size=3,pool_size=2,pool_stride=2,groups=3,pool_padding=0,
     pool_type='max',conv_stride=1,conv_padding=0,act='relu')
     self.linear1 = Linear(input_dim=12800,output_dim=4096,act='relu')
     self.linear2 = Linear(input_dim=4096,output_dim=4096,act='relu')
     self.linear3 = Linear(input_dim=4096,output_dim=2,act='softmax')
Beispiel #2
0
 def __init__(self, input_size):
     super(MLP, self).__init__()
     self._linear1 = Linear(
         input_size,
         3,
         param_attr=fluid.ParamAttr(
             initializer=fluid.initializer.Constant(value=0.1)),
         bias_attr=fluid.ParamAttr(
             initializer=fluid.initializer.Constant(value=0.1)))
     self._linear2 = Linear(
         3,
         4,
         param_attr=fluid.ParamAttr(
             initializer=fluid.initializer.Constant(value=0.1)),
         bias_attr=fluid.ParamAttr(
             initializer=fluid.initializer.Constant(value=0.1)))
    def __init__(self, layers=50, class_dim=102, use_cudnn=False):
        super(ResNet, self).__init__()

        self.layers = layers
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, \
            "supported layers are {} but input layer is {}".format(supported_layers, layers)

        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_channels = [64, 256, 512, 1024]
        num_filters = [64, 128, 256, 512]

        self.conv = ConvBNLayer(num_channels=3,
                                num_filters=64,
                                filter_size=7,
                                stride=2,
                                act='relu',
                                use_cudnn=use_cudnn)
        self.pool2d_max = Pool2D(pool_size=3,
                                 pool_stride=2,
                                 pool_padding=1,
                                 pool_type='max')

        self.bottleneck_block_list = []
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    'bb_%d_%d' % (block, i),
                    BottleneckBlock(num_channels=num_channels[block]
                                    if i == 0 else num_filters[block] * 4,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    shortcut=shortcut,
                                    use_cudnn=use_cudnn))
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

        self.pool2d_avg = Pool2D(pool_size=7,
                                 pool_type='avg',
                                 global_pooling=True)

        self.pool2d_avg_output = num_filters[-1] * 4 * 1 * 1

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        self.out = Linear(
            self.pool2d_avg_output,
            class_dim,
            act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
Beispiel #4
0
 def __init__(self,
              d_key,
              d_value,
              d_model,
              n_head=1,
              dropout_rate=0.,
              cache=None,
              gather_idx=None,
              static_kv=False):
     super(MultiHeadAttentionLayer, self).__init__()
     self._n_head = n_head
     self._d_key = d_key
     self._d_value = d_value
     self._d_model = d_model
     self._dropout_rate = dropout_rate
     self._q_fc = Linear(self._d_model, d_key * n_head, bias_attr=False)
     self._k_fc = Linear(self._d_model, d_key * n_head, bias_attr=False)
     self._v_fc = Linear(self._d_model, d_value * n_head, bias_attr=False)
     self._proj_fc = Linear(d_value * n_head, self._d_model, bias_attr=False)
Beispiel #5
0
    def __init__(self, num_classes=9):
        super(AlexNet, self).__init__()

        self.conv1 = ConvBNLayer(num_channels=2, num_filters=96, filter_size=11, stride=4, padding=5, act='leaky_relu')
        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = ConvBNLayer(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu')
        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv3 = ConvBNLayer(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='leaky_relu')
        self.conv4 = ConvBNLayer(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='leaky_relu')
        self.conv5 = ConvBNLayer(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='leaky_relu')
        self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')

        self.fc1 = Linear(input_dim=256 * 3, output_dim=256 * 2, act='relu')
        self.drop_ratio1 = 0.5
        self.fc2 = Linear(input_dim=256 * 2, output_dim=256 * 1, act='relu')
        self.drop_ratio2 = 0.5
        self.fc3 = Linear(input_dim=256, output_dim=num_classes)

        self.conv_layers = [self.conv1, self.pool1, self.conv2, self.pool2, self.conv3, self.conv4, self.conv5,
                            self.pool5]
Beispiel #6
0
    def __init__(self,
                 trg_vocab_size,
                 max_length,
                 n_layer,
                 n_head,
                 d_key,
                 d_value,
                 d_model,
                 d_inner_hid,
                 prepostprocess_dropout,
                 attention_dropout,
                 relu_dropout,
                 preprocess_cmd,
                 postprocess_cmd,
                 weight_sharing,
                 caches=None,
                 gather_idx=None,
                 is_sparse=False):
        """
        The wrapper assembles together all needed layers for the encoder.
        """
        super(WrapDecoderLayer, self).__init__()

        self._prepare_decoder_layer = PrepareEncoderDecoderLayer(
            trg_vocab_size,
            d_model,
            max_length,
            prepostprocess_dropout,
            is_sparse=is_sparse,
            word_emb_param_name=word_emb_param_names[1],
            pos_enc_param_name=pos_enc_param_names[1])
        self._decoder_layer = DecoderLayer(
            n_layer,
            n_head,
            d_key,
            d_value,
            d_model,
            d_inner_hid,
            prepostprocess_dropout,
            attention_dropout,
            relu_dropout,
            preprocess_cmd,
            postprocess_cmd,
            caches=caches,
            gather_idx=gather_idx)
        self._weight_sharing = weight_sharing
        if not weight_sharing:
            self._fc = Linear(d_model, trg_vocab_size, bias_attr=False)
Beispiel #7
0
 def __init__(self):
     super(Generator, self).__init__()
     self._fc1 = Linear(2, 64, act='elu')
     self._fc2 = Linear(64, 64, act='elu')
     self._fc3 = Linear(64, 1)
Beispiel #8
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self._fc1 = Linear(1, 32, act='elu')
     self._fc2 = Linear(32, 1)
Beispiel #9
0
 def __init__(self, d_inner_hid, d_hid, dropout_rate):
     super(PositionwiseFeedForwardLayer, self).__init__()
     self._i2h = Linear(d_hid, d_inner_hid, act="relu")
     self._h2o = Linear(d_inner_hid, d_hid)
     self._dropout_rate = dropout_rate