Exemplo n.º 1
0
def StaticLenet(data, num_classes=10, classifier_activation='softmax'):
    conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
    conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
    fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
    fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
    fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
    conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
    conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
    fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
    fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
    fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
    conv1 = fluid.layers.conv2d(data,
                                num_filters=6,
                                filter_size=3,
                                stride=1,
                                padding=1,
                                param_attr=conv2d_w1_attr,
                                bias_attr=conv2d_b1_attr)
    batch_norm1 = layers.batch_norm(conv1)
    relu1 = layers.relu(batch_norm1)
    pool1 = fluid.layers.pool2d(relu1,
                                pool_size=2,
                                pool_type='max',
                                pool_stride=2)
    conv2 = fluid.layers.conv2d(pool1,
                                num_filters=16,
                                filter_size=5,
                                stride=1,
                                padding=0,
                                param_attr=conv2d_w2_attr,
                                bias_attr=conv2d_b2_attr)
    batch_norm2 = layers.batch_norm(conv2)
    relu6_1 = layers.relu6(batch_norm2)
    pool2 = fluid.layers.pool2d(relu6_1,
                                pool_size=2,
                                pool_type='max',
                                pool_stride=2)

    fc1 = fluid.layers.fc(input=pool2,
                          size=120,
                          param_attr=fc_w1_attr,
                          bias_attr=fc_b1_attr)
    leaky_relu1 = layers.leaky_relu(fc1, alpha=0.01)
    fc2 = fluid.layers.fc(input=leaky_relu1,
                          size=84,
                          param_attr=fc_w2_attr,
                          bias_attr=fc_b2_attr)
    sigmoid1 = layers.sigmoid(fc2)
    fc3 = fluid.layers.fc(input=sigmoid1,
                          size=num_classes,
                          param_attr=fc_w3_attr,
                          bias_attr=fc_b3_attr)
    softmax1 = layers.softmax(fc3, use_cudnn=True)
    return softmax1
Exemplo n.º 2
0
 def mlp(self, features, name):
     h = features
     dim = features.shape[-1]
     dim_list = [dim * 2, dim]
     for i in range(2):
         h = L.fc(h,
                  size=dim_list[i],
                  name="%s_fc_%s" % (name, i),
                  act=None)
         if self.args.norm_type == "layer_norm":
             log.info("norm_type is %s" % self.args.norm_type)
             h = L.layer_norm(
                 h,
                 begin_norm_axis=1,
                 param_attr=F.ParamAttr(
                     name="norm_scale_%s_%s" % (name, i),
                     initializer=F.initializer.Constant(1.0)),
                 bias_attr=F.ParamAttr(
                     name="norm_bias_%s_%s" % (name, i),
                     initializer=F.initializer.Constant(0.0)),
             )
         else:
             log.info("using batch_norm")
             h = L.batch_norm(h)
         h = pgl.layers.graph_norm(self.graph_wrapper, h)
         h = L.relu(h)
     return h
Exemplo n.º 3
0
    def conv_bn_layer(self,
                      input,
                      num_filters,
                      filter_size,
                      stride,
                      padding,
                      name=None):
        """Create conv+bn layer"""
        conv = FL.conv2d(input=input,
                         num_filters=num_filters,
                         filter_size=filter_size,
                         stride=stride,
                         padding=padding,
                         groups=1,
                         act=None,
                         param_attr=ParamAttr(name=name + "_weights"),
                         bias_attr=False,
                         name=name + '.conv2d.output.1')

        bn_name = name + ".bn"
        return FL.batch_norm(
            input=conv,
            act=None,
            name=bn_name + '.output.1',
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance',
        )
Exemplo n.º 4
0
    def forward(self):
        """forward"""
        features_list = [self.gw.node_feat["attr"]]

        for i in range(self.num_layers):
            h = gin(self.gw,
                    features_list[i],
                    hidden_size=self.hidden_size,
                    activation="relu",
                    name="gin_%s" % (i),
                    init_eps=0.0,
                    train_eps=self.train_eps)

            h = fl.batch_norm(h)
            h = fl.relu(h)

            features_list.append(h)

        output = 0
        for i, h in enumerate(features_list):
            pooled_h = pgl.layers.graph_pooling(self.gw, h, self.pool_type)
            drop_h = fl.dropout(pooled_h,
                                self.dropout_prob,
                                dropout_implementation="upscale_in_train")
            output += fl.fc(drop_h,
                            size=self.num_class,
                            act=None,
                            param_attr=fluid.ParamAttr(name="final_fc_%s" %
                                                       (i)))

        # calculate loss
        self.loss = fl.softmax_with_cross_entropy(output, self.labels)
        self.loss = fl.reduce_mean(self.loss)
        self.acc = fl.accuracy(fl.softmax(output), self.labels)
Exemplo n.º 5
0
def conv2d_unit(x, filters, kernels, stride, padding, name, is_test,
                trainable):
    x = P.conv2d(input=x,
                 num_filters=filters,
                 filter_size=kernels,
                 stride=stride,
                 padding=padding,
                 act=None,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                     0.0, 0.01),
                                      name=name + ".conv.weights",
                                      trainable=trainable),
                 bias_attr=False)
    bn_name = name + ".bn"
    x = P.batch_norm(
        input=x,
        act=None,
        is_test=is_test,
        param_attr=ParamAttr(initializer=fluid.initializer.Constant(1.0),
                             regularizer=L2Decay(0.),
                             trainable=trainable,
                             name=bn_name + '.scale'),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            regularizer=L2Decay(0.),
                            trainable=trainable,
                            name=bn_name + '.offset'),
        moving_mean_name=bn_name + '.mean',
        moving_variance_name=bn_name + '.var')
    x = P.leaky_relu(x, alpha=0.1)
    return x
Exemplo n.º 6
0
 def forward(self, x):
     N, C, H, W = x.shape
     x_r = L.reshape(x, (1, N * C, H, W))
     y_r = L.batch_norm(x_r,
                        epsilon=self.eps,
                        do_model_average_for_mean_and_var=False,
                        use_global_stats=False)
     return L.reshape(y_r, (N, C, H, W))
Exemplo n.º 7
0
 def mlp(self, feat):
     for i in range(3):
         feat = L.fc(node,
                     size=self.hidden_size,
                     name="simple_mlp_{}".format(i))
         feat = L.batch_norm(feat)
         feat = L.relu(feat)
         feat = L.dropout(feat, dropout_prob=0.5)
     return feat
def _DBL(input, num_filters, filter_size, padding=1, name=None):
    conv = pfl.conv2d(input=input,
                      num_filters=num_filters,
                      filter_size=filter_size,
                      padding=padding,
                      name=(name + '_conv2d') if name else None)
    bn = pfl.batch_norm(input=conv, name=(name + '_conv2d') if name else None)
    act = pfl.leaky_relu(bn, name=(name + '_act') if name else None)
    return act
Exemplo n.º 9
0
    def test_batch_norm(self):
        program = Program()
        with program_guard(program):
            data = layers.data(name='data',
                               shape=[32, 128, 128],
                               dtype="float32")
            out = layers.batch_norm(data)

        print(str(program))
Exemplo n.º 10
0
 def forward(self, x):
     if self.training:
         N, C, H, W = x.shape
         x_r = L.reshape(x, (1, N*C, H, W))
         # for numeric stability
         y_r = L.batch_norm(x_r, epsilon=self.eps, do_model_average_for_mean_and_var=False, use_global_stats=False)
         return L.reshape(y_r, (N, C, H, W))
     else:
         _mean = L.reduce_mean(x, dim=[2, 3], keep_dim=True)
         _var = var(x, dim=[2, 3], unbiased=False, keepdim=True)
         y = (x - _mean) / L.sqrt(_var + self.eps)
         return y
Exemplo n.º 11
0
    def forward(self, gw):
        x = self._atom_encoder(gw)
        patch_repr = []
        for i in range(self.num_layers):
            e = self._bond_encoder(gw, name='l%d'%i)
            x = gin_layer(gw, x, e, 'gin_%s' % i)
            x = L.batch_norm(
                x, param_attr=F.ParamAttr(name='batchnorm_%s' % i))
            patch_repr.append(x)  # $h_i^{(k)}$

        patch_summary = L.concat(patch_repr, axis=1)  # $h_{\phi}^i$
        patch_pool = [pgl.layers.graph_pooling(gw, x, 'sum')
                      for x in patch_repr]
        global_repr = L.concat(patch_pool, axis=1)
        return global_repr, patch_summary
Exemplo n.º 12
0
 def __call__(self, input, is_test=False):
     return layers.batch_norm(
         input=input,
         act=act,
         is_test=is_test,
         momentum=momentum,
         epsilon=epsilon,
         param_attr=self.attr_holder.param_attr,
         bias_attr=self.attr_holder.bias_attr,
         data_layout=data_layout,
         in_place=in_place,
         name=name,
         moving_mean_name=self.attr_holder.moving_mean_attr.name,
         moving_variance_name=self.attr_holder.moving_variance_attr.
         name,
         do_model_average_for_mean_and_var=
         do_model_average_for_mean_and_var,
         use_global_stats=use_global_stats)
Exemplo n.º 13
0
 def conv_bn_layer(self,
                   input,
                   num_filters,
                   filter_size,
                   stride=1,
                   groups=1,
                   act=None,
                   name=None):
     conv = layers.conv2d(input=input,
                          num_filters=num_filters,
                          filter_size=filter_size,
                          stride=stride,
                          padding=(filter_size - 1) // 2,
                          groups=groups,
                          act=None,
                          param_attr=ParamAttr(name=name + "_weights"),
                          bias_attr=False)
     bn_name = "bn_" + name
     return layers.batch_norm(input=conv,
                              act=act,
                              param_attr=ParamAttr(name=bn_name + '_scale'),
                              bias_attr=ParamAttr(bn_name + '_offset'),
                              moving_mean_name=bn_name + '_mean',
                              moving_variance_name=bn_name + '_variance')
Exemplo n.º 14
0
def identity_block(input_tensor,
                   filters,
                   pre_name,
                   is_test,
                   trainable,
                   use_dcn=False):
    filters1, filters2, filters3 = filters

    x = P.conv2d(input=input_tensor,
                 num_filters=filters1,
                 filter_size=1,
                 stride=1,
                 padding=0,
                 act=None,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                     0.0, 0.01),
                                      name=pre_name + ".conv1.weight",
                                      trainable=trainable),
                 bias_attr=False)
    x = P.batch_norm(
        input=x,
        act=None,
        is_test=is_test,
        param_attr=ParamAttr(initializer=fluid.initializer.Constant(1.0),
                             regularizer=L2Decay(0.),
                             trainable=trainable,
                             name=pre_name + '.bn1.weight'),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            regularizer=L2Decay(0.),
                            trainable=trainable,
                            name=pre_name + '.bn1.bias'),
        moving_mean_name=pre_name + '.bn1.running_mean',
        moving_variance_name=pre_name + '.bn1.running_var')
    x = P.relu(x)

    if use_dcn:
        offset_mask = P.conv2d(
            input=x,
            num_filters=27,
            filter_size=3,
            stride=1,
            padding=1,
            act=None,
            param_attr=ParamAttr(
                initializer=fluid.initializer.Normal(0.0, 0.01),
                name=pre_name + ".conv2.conv_offset_mask.weight",
                trainable=trainable),
            bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                                name=pre_name + ".conv2.conv_offset_mask.bias",
                                trainable=trainable))
        offset = offset_mask[:, :18, :, :]
        mask = offset_mask[:, 18:, :, :]
        mask = P.sigmoid(mask)
        x = P.deformable_conv(
            input=x,
            offset=offset,
            mask=mask,
            num_filters=filters2,
            filter_size=3,
            stride=1,
            padding=1,
            groups=1,
            deformable_groups=1,
            im2col_step=1,
            param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                0.0, 0.01),
                                 name=pre_name + ".conv2.weight",
                                 trainable=trainable),
            bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                                name=pre_name + ".conv2.bias",
                                trainable=trainable))
    else:
        x = P.conv2d(input=x,
                     num_filters=filters2,
                     filter_size=3,
                     stride=1,
                     padding=1,
                     act=None,
                     param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                         0.0, 0.01),
                                          name=pre_name + ".conv2.weight",
                                          trainable=trainable),
                     bias_attr=False)
    x = P.batch_norm(
        input=x,
        act=None,
        is_test=is_test,
        param_attr=ParamAttr(initializer=fluid.initializer.Constant(1.0),
                             regularizer=L2Decay(0.),
                             trainable=trainable,
                             name=pre_name + '.bn2.weight'),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            regularizer=L2Decay(0.),
                            trainable=trainable,
                            name=pre_name + '.bn2.bias'),
        moving_mean_name=pre_name + '.bn2.running_mean',
        moving_variance_name=pre_name + '.bn2.running_var')
    x = P.relu(x)

    x = P.conv2d(input=x,
                 num_filters=filters3,
                 filter_size=1,
                 stride=1,
                 padding=0,
                 act=None,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                     0.0, 0.01),
                                      name=pre_name + ".conv3.weight",
                                      trainable=trainable),
                 bias_attr=False)
    x = P.batch_norm(
        input=x,
        act=None,
        is_test=is_test,
        param_attr=ParamAttr(initializer=fluid.initializer.Constant(1.0),
                             regularizer=L2Decay(0.),
                             trainable=trainable,
                             name=pre_name + '.bn3.weight'),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            regularizer=L2Decay(0.),
                            trainable=trainable,
                            name=pre_name + '.bn3.bias'),
        moving_mean_name=pre_name + '.bn3.running_mean',
        moving_variance_name=pre_name + '.bn3.running_var')

    x = P.elementwise_add(x=x, y=input_tensor, act='relu')
    return x
Exemplo n.º 15
0
def Resnet101(inputs, is_test, trainable, use_dcn):
    x = P.conv2d(inputs,
                 64,
                 filter_size=7,
                 stride=2,
                 padding=3,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                     0.0, 0.01),
                                      name="backbone.conv1.weight",
                                      trainable=trainable),
                 bias_attr=False)
    x = P.batch_norm(
        input=x,
        act=None,
        is_test=is_test,
        param_attr=ParamAttr(initializer=fluid.initializer.Constant(1.0),
                             regularizer=L2Decay(0.),
                             trainable=trainable,
                             name='backbone.bn1.weight'),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            regularizer=L2Decay(0.),
                            trainable=trainable,
                            name='backbone.bn1.bias'),
        moving_mean_name='backbone.bn1.running_mean',
        moving_variance_name='backbone.bn1.running_var')
    x = P.relu(x)
    x = P.pool2d(x,
                 pool_size=3,
                 pool_type="max",
                 pool_stride=2,
                 pool_padding=1)

    # stage2
    x = conv_block(x, [64, 64, 256],
                   'backbone.layers.0.0',
                   is_test,
                   trainable,
                   stride=1)
    x = identity_block(x, [64, 64, 256], 'backbone.layers.0.1', is_test,
                       trainable)
    x = identity_block(x, [64, 64, 256], 'backbone.layers.0.2', is_test,
                       trainable)
    # stage3
    x = conv_block(x, [128, 128, 512],
                   'backbone.layers.1.0',
                   is_test,
                   trainable,
                   use_dcn=use_dcn)
    x = identity_block(x, [128, 128, 512],
                       'backbone.layers.1.1',
                       is_test,
                       trainable,
                       use_dcn=use_dcn)
    x = identity_block(x, [128, 128, 512],
                       'backbone.layers.1.2',
                       is_test,
                       trainable,
                       use_dcn=use_dcn)
    s8 = identity_block(x, [128, 128, 512],
                        'backbone.layers.1.3',
                        is_test,
                        trainable,
                        use_dcn=use_dcn)
    # stage4
    x = conv_block(s8, [256, 256, 1024],
                   'backbone.layers.2.0',
                   is_test,
                   trainable,
                   use_dcn=use_dcn)
    for i in range(1, 22):
        x = identity_block(x, [256, 256, 1024],
                           'backbone.layers.2.%d' % i,
                           is_test,
                           trainable,
                           use_dcn=use_dcn)
    s16 = identity_block(x, [256, 256, 1024],
                         'backbone.layers.2.22',
                         is_test,
                         trainable,
                         use_dcn=use_dcn)
    # stage5
    x = conv_block(s16, [512, 512, 2048],
                   'backbone.layers.3.0',
                   is_test,
                   trainable,
                   use_dcn=use_dcn)
    x = identity_block(x, [512, 512, 2048],
                       'backbone.layers.3.1',
                       is_test,
                       trainable,
                       use_dcn=use_dcn)
    s32 = identity_block(x, [512, 512, 2048],
                         'backbone.layers.3.2',
                         is_test,
                         trainable,
                         use_dcn=use_dcn)

    return s8, s16, s32
Exemplo n.º 16
0
    def forward(self, graph_wrapper, is_test=False):
        """
        Build the network.
        """
        node_features = self._mol_encoder(graph_wrapper, name=self.name)

        features_list = [node_features]
        for layer in range(self.layer_num):
            edge_features = self._bond_encoder(
                    graph_wrapper, 
                    name='%s_layer%s' % (self.name, layer))
            if self.gnn_type == "gcn":
                feat = gcn_layer(
                        graph_wrapper,
                        features_list[layer],
                        edge_features,
                        act="relu",
                        name="%s_layer%s_gcn" % (self.name, layer))
            elif self.gnn_type == "gat":
                feat = gat_layer(
                        graph_wrapper, 
                        features_list[layer],
                        edge_features,
                        self.embed_dim,
                        act="relu",
                        name="%s_layer%s_gat" % (self.name, layer))
            else:
                feat = gin_layer(
                        graph_wrapper,
                        features_list[layer],
                        edge_features,
                        name="%s_layer%s_gin" % (self.name, layer))

            if self.norm_type == 'batch_norm':
                feat = layers.batch_norm(
                        feat, 
                        param_attr=fluid.ParamAttr(
                            name="%s_layer%s_batch_norm_scale" % (self.name, layer),
                            initializer=fluid.initializer.Constant(1.0)),
                        bias_attr=fluid.ParamAttr(
                            name="%s_layer%s_batch_norm_bias" % (self.name, layer),
                            initializer=fluid.initializer.Constant(0.0)),
                        moving_mean_name="%s_layer%s_batch_norm_moving_avearage" % (self.name, layer),
                        moving_variance_name="%s_layer%s_batch_norm_moving_variance" % (self.name, layer),
                        is_test=is_test)
            elif self.norm_type == 'layer_norm':
                feat = layers.layer_norm(
                        feat, 
                        param_attr=fluid.ParamAttr(
                            name="%s_layer%s_layer_norm_scale" % (self.name, layer),
                            initializer=fluid.initializer.Constant(1.0)),
                        bias_attr=fluid.ParamAttr(
                            name="%s_layer%s_layer_norm_bias" % (self.name, layer),
                            initializer=fluid.initializer.Constant(0.0)))
            else:
                raise ValueError('%s not supported.' % self.norm_type)

            if self.graph_norm:
                feat = pgl.layers.graph_norm(graph_wrapper, feat)

            if layer < self.layer_num - 1:
                feat = layers.relu(feat)
            feat = layers.dropout(
                    feat,
                    self.dropout_rate,
                    dropout_implementation="upscale_in_train",
                    is_test=is_test)

            # residual
            if self.residual:
                feat = feat + features_list[layer]

            features_list.append(feat)

        if self.JK == "sum":
            node_repr = layers.reduce_sum(features_list, axis=0)
        elif self.JK == "mean":
            node_repr = layers.reduce_mean(features_list, axis=0)
        elif self.JK == "last":
            node_repr = features_list[-1]
        else:
            node_repr = features_list[-1]
        return node_repr
Exemplo n.º 17
0
def up_sampling_2(x, num_filters, name, act='relu'):
    x = pixel_shuffle(x, 2)
    x = conv2d(x, num_filters, 3, padding=1, name=name + "_conv2d_1")
    x = batch_norm(x, act=act, name=name + "_bn")
    return x
Exemplo n.º 18
0
def down_sampling_2(x, num_filters, name, act='leaky_relu'):
    x = conv2d(x, num_filters, 3, stride=2, padding=1, name=name + "_conv2d")
    x = batch_norm(x, act=act, name=name + "_bn")
    x = dropout(x, 0.25, name=name + "_dropout")
    return x