Example #1
0
 def forward(self, input):
     if self.inplace:
         input.set_value(layers.leaky_relu(input, alpha))
         return input
     else:
         y = layers.leaky_relu(input, alpha)
         return y
Example #2
0
    def forward(self, tenFirst, tenSecond, tenFeaturesFirst, tenFeaturesSecond, tenFlow):
        tenFeaturesFirst = self.moduleFeat(tenFeaturesFirst)
        tenFeaturesSecond = self.moduleFeat(tenFeaturesSecond)

        if tenFlow is not None:
            tenFlow = self.moduleUpflow(tenFlow)
        
        if tenFlow is not None:
            tenFeaturesSecond = backwarp(tenInput=tenFeaturesSecond, 
                                         tenFlow=tenFlow * self.fltBackwarp)

        if self.moduleUpcorr is None:
            correlation = nn.correlation(tenFeaturesFirst, tenFeaturesSecond, 
                                         pad_size=3,
                                         kernel_size=1,
                                         max_displacement=3,
                                         stride1=1,
                                         stride2=1,)
            tenCorrelation = L.leaky_relu(correlation, alpha=0.1)
        elif self.moduleUpcorr is not None:
            correlation = nn.correlation(tenFeaturesFirst, tenFeaturesSecond, 
                                         pad_size=6,
                                         kernel_size=1,
                                         max_displacement=6,
                                         stride1=2,
                                         stride2=2,)
            tenCorrelation = L.leaky_relu(correlation, alpha=0.1)
            tenCorrelation = self.moduleUpcorr(tenCorrelation)
            
        return (tenFlow if tenFlow is not None else 0.0) + self.moduleMain(tenCorrelation)
Example #3
0
 def forward(self, x):
     if self.inplce:
         x.set_value(leaky_relu(x, self.alpha))
         return x
     else:
         y = leaky_relu(x, self.alpha)
         return y
Example #4
0
    def forward(self, x):
        """Forward network"""
        x = self.linear(x)
        x = layers.leaky_relu(x, alpha=0.1)
        x = self.dropout(x)

        return x
def conv2d_unit(x, filters, kernels, stride, padding, name, is_test,
                trainable):
    x = P.conv2d(input=x,
                 num_filters=filters,
                 filter_size=kernels,
                 stride=stride,
                 padding=padding,
                 act=None,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                     0.0, 0.01),
                                      name=name + ".conv.weights",
                                      trainable=trainable),
                 bias_attr=False)
    bn_name = name + ".bn"
    x = P.batch_norm(
        input=x,
        act=None,
        is_test=is_test,
        param_attr=ParamAttr(initializer=fluid.initializer.Constant(1.0),
                             regularizer=L2Decay(0.),
                             trainable=trainable,
                             name=bn_name + '.scale'),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            regularizer=L2Decay(0.),
                            trainable=trainable,
                            name=bn_name + '.offset'),
        moving_mean_name=bn_name + '.mean',
        moving_variance_name=bn_name + '.var')
    x = P.leaky_relu(x, alpha=0.1)
    return x
Example #6
0
 def test_leaky_relu(self):
     program = Program()
     with program_guard(program):
         input = layers.data(name="input", shape=[16], dtype="float32")
         out = layers.leaky_relu(input, alpha=0.1, name='leaky_relu')
         self.assertIsNotNone(out)
     print(str(program))
 def forward(self, x):
     # print(type(x))
     # x = layers.fc(x,self.out_channels,num_flatten_dims=len(x.shape)-1,param_attr=self.weight_attr,bias_attr=self.bias_attr)
     x = fluid.layers.cast(x, dtype='float32')
     x = self.linear(x)
     x = layers.leaky_relu(x, alpha=0.2)
     return x
def _DBL(input, num_filters, filter_size, padding=1, name=None):
    conv = pfl.conv2d(input=input,
                      num_filters=num_filters,
                      filter_size=filter_size,
                      padding=padding,
                      name=(name + '_conv2d') if name else None)
    bn = pfl.batch_norm(input=conv, name=(name + '_conv2d') if name else None)
    act = pfl.leaky_relu(bn, name=(name + '_act') if name else None)
    return act
Example #9
0
def StaticLenet(data, num_classes=10, classifier_activation='softmax'):
    conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
    conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
    fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
    fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
    fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
    conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1")
    conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
    fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
    fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
    fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
    conv1 = fluid.layers.conv2d(data,
                                num_filters=6,
                                filter_size=3,
                                stride=1,
                                padding=1,
                                param_attr=conv2d_w1_attr,
                                bias_attr=conv2d_b1_attr)
    batch_norm1 = layers.batch_norm(conv1)
    relu1 = layers.relu(batch_norm1)
    pool1 = fluid.layers.pool2d(relu1,
                                pool_size=2,
                                pool_type='max',
                                pool_stride=2)
    conv2 = fluid.layers.conv2d(pool1,
                                num_filters=16,
                                filter_size=5,
                                stride=1,
                                padding=0,
                                param_attr=conv2d_w2_attr,
                                bias_attr=conv2d_b2_attr)
    batch_norm2 = layers.batch_norm(conv2)
    relu6_1 = layers.relu6(batch_norm2)
    pool2 = fluid.layers.pool2d(relu6_1,
                                pool_size=2,
                                pool_type='max',
                                pool_stride=2)

    fc1 = fluid.layers.fc(input=pool2,
                          size=120,
                          param_attr=fc_w1_attr,
                          bias_attr=fc_b1_attr)
    leaky_relu1 = layers.leaky_relu(fc1, alpha=0.01)
    fc2 = fluid.layers.fc(input=leaky_relu1,
                          size=84,
                          param_attr=fc_w2_attr,
                          bias_attr=fc_b2_attr)
    sigmoid1 = layers.sigmoid(fc2)
    fc3 = fluid.layers.fc(input=sigmoid1,
                          size=num_classes,
                          param_attr=fc_w3_attr,
                          bias_attr=fc_b3_attr)
    softmax1 = layers.softmax(fc3, use_cudnn=True)
    return softmax1
Example #10
0
    def forward(self, x):
        """Compute the upsampled condition.

        Args:
            x (Variable): shape(B, F, T), dtype float32, the condition (mel spectrogram here.) (F means the frequency bands). In the internal Conv2DTransposes, the frequency dimension is treated as `height` dimension instead of `in_channels`.

        Returns:
            Variable: shape(B, F, T * upscale_factor), dtype float32, the upsampled condition.
        """
        x = F.unsqueeze(x, axes=[1])
        for sublayer in self.upsample_convs:
            x = F.leaky_relu(sublayer(x), alpha=.4)
        x = F.squeeze(x, [1])
        return x
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.005
        alpha = 0.2
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True

        y = layers.leaky_relu(x, alpha=alpha)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        x_arr[np.abs(x_arr) < 0.005] = 0.02

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)
Example #12
0
    def forward(self, input, adj):
        """Forward network"""
        h = layers.fc(input, size=self.out_features, num_flatten_dims=2)

        _, N, _ = h.shape
        middle_result1 = layers.expand(layers.matmul(h, self.a1),
                                       expand_times=(1, 1, N))
        middle_result2 = layers.transpose(layers.expand(
            layers.matmul(h, self.a2), expand_times=(1, 1, N)),
                                          perm=[0, 2, 1])
        e = layers.leaky_relu(middle_result1 + middle_result2, self.alpha)
        adj = layers.cast(adj, dtype='int32')
        attention = nn.mask_fill(e, adj == 0.0, -1e9)
        attention = layers.softmax(attention, axis=2)
        attention = layers.dropout(attention, self.dropout)
        h_prime = layers.matmul(attention, h)
        if self.concat:
            return layers.elu(h_prime)
        else:
            return h_prime
Example #13
0
def gaan(gw, feature, hidden_size_a, hidden_size_v, hidden_size_m,
         hidden_size_o, heads, name):
    """Implementation of GaAN"""
    def send_func(src_feat, dst_feat, edge_feat):
        # 计算每条边上的注意力分数
        # E * (M * D1), 每个 dst 点都查询它的全部邻边的 src 点
        feat_query, feat_key = dst_feat['feat_query'], src_feat['feat_key']
        # E * M * D1
        old = feat_query
        feat_query = L.reshape(feat_query, [-1, heads, hidden_size_a])
        feat_key = L.reshape(feat_key, [-1, heads, hidden_size_a])
        # E * M
        alpha = L.reduce_sum(feat_key * feat_query, dim=-1)

        return {
            'dst_node_feat': dst_feat['node_feat'],
            'src_node_feat': src_feat['node_feat'],
            'feat_value': src_feat['feat_value'],
            'alpha': alpha,
            'feat_gate': src_feat['feat_gate']
        }

    def recv_func(message):
        # 每条边的终点的特征
        dst_feat = message['dst_node_feat']
        # 每条边的出发点的特征
        src_feat = message['src_node_feat']
        # 每个中心点自己的特征
        x = L.sequence_pool(dst_feat, 'average')
        # 每个中心点的邻居的特征的平均值
        z = L.sequence_pool(src_feat, 'average')

        # 计算 gate
        feat_gate = message['feat_gate']
        g_max = L.sequence_pool(feat_gate, 'max')
        g = L.concat([x, g_max, z], axis=1)
        g = L.fc(g, heads, bias_attr=False, act="sigmoid")

        # softmax
        alpha = message['alpha']
        alpha = paddle_helper.sequence_softmax(alpha)  # E * M

        feat_value = message['feat_value']  # E * (M * D2)
        old = feat_value
        feat_value = L.reshape(feat_value,
                               [-1, heads, hidden_size_v])  # E * M * D2
        feat_value = L.elementwise_mul(feat_value, alpha, axis=0)
        feat_value = L.reshape(feat_value,
                               [-1, heads * hidden_size_v])  # E * (M * D2)
        feat_value = L.lod_reset(feat_value, old)

        feat_value = L.sequence_pool(feat_value, 'sum')  # N * (M * D2)

        feat_value = L.reshape(feat_value,
                               [-1, heads, hidden_size_v])  # N * M * D2

        output = L.elementwise_mul(feat_value, g, axis=0)
        output = L.reshape(output, [-1, heads * hidden_size_v])  # N * (M * D2)

        output = L.concat([x, output], axis=1)

        return output

    # feature N * D

    # 计算每个点自己需要发送出去的内容
    # 投影后的特征向量
    # N * (D1 * M)
    feat_key = L.fc(feature,
                    hidden_size_a * heads,
                    bias_attr=False,
                    param_attr=fluid.ParamAttr(name=name + '_project_key'))
    # N * (D2 * M)
    feat_value = L.fc(feature,
                      hidden_size_v * heads,
                      bias_attr=False,
                      param_attr=fluid.ParamAttr(name=name + '_project_value'))
    # N * (D1 * M)
    feat_query = L.fc(feature,
                      hidden_size_a * heads,
                      bias_attr=False,
                      param_attr=fluid.ParamAttr(name=name + '_project_query'))
    # N * Dm
    feat_gate = L.fc(feature,
                     hidden_size_m,
                     bias_attr=False,
                     param_attr=fluid.ParamAttr(name=name + '_project_gate'))

    # send 阶段

    message = gw.send(
        send_func,
        nfeat_list=[('node_feat', feature), ('feat_key', feat_key),
                    ('feat_value', feat_value), ('feat_query', feat_query),
                    ('feat_gate', feat_gate)],
        efeat_list=None,
    )

    # 聚合邻居特征
    output = gw.recv(message, recv_func)
    output = L.fc(output,
                  hidden_size_o,
                  bias_attr=False,
                  param_attr=fluid.ParamAttr(name=name + '_project_output'))
    output = L.leaky_relu(output, alpha=0.1)
    output = L.dropout(output, dropout_prob=0.1)

    return output
    def forward(self, input):
        if self.structure == 'fixed':
            x = layers.leaky_relu(self.fromrgb(input), 0.2)
            # 1. 1024 x 1024 x nf(9)(16) -> 512 x 512
            res = self.resolution_log2
            x = layers.leaky_relu(self.conv1(x), 0.2)
            x = layers.leaky_relu(self.down1(self.blur2d(x)), 0.2)

            # 2. 512 x 512 -> 256 x 256
            res -= 1
            x = layers.leaky_relu(self.conv2(x), 0.2)
            x = layers.leaky_relu(self.down1(self.blur2d(x)), 0.2)

            # 3. 256 x 256 -> 128 x 128
            res -= 1
            x = layers.leaky_relu(self.conv3(x), 0.2)
            x = layers.leaky_relu(self.down1(self.blur2d(x)), 0.2)

            # 4. 128 x 128 -> 64 x 64
            res -= 1
            x = layers.leaky_relu(self.conv4(x), 0.2)
            x = layers.leaky_relu(self.down1(self.blur2d(x)), 0.2)

            # 5. 64 x 64 -> 32 x 32
            res -= 1
            x = layers.leaky_relu(self.conv5(x), 0.2)
            x = layers.leaky_relu(self.down21(self.blur2d(x)), 0.2)

            # 6. 32 x 32 -> 16 x 16
            res -= 1
            x = layers.leaky_relu(self.conv6(x), 0.2)
            x = layers.leaky_relu(self.down22(self.blur2d(x)), 0.2)

            # 7. 16 x 16 -> 8 x 8
            res -= 1
            x = layers.leaky_relu(self.conv7(x), 0.2)
            x = layers.leaky_relu(self.down23(self.blur2d(x)), 0.2)

            # 8. 8 x 8 -> 4 x 4
            res -= 1
            x = layers.leaky_relu(self.conv8(x), 0.2)
            x = layers.leaky_relu(self.down24(self.blur2d(x)), 0.2)

            # 9. 4 x 4 -> point
            x = layers.leaky_relu(self.conv_last(x), 0.2)
            # N x 8192(4 x 4 x nf(1)).
            x = layers.reshape(x, (x.shape[0], -1))
            x = layers.leaky_relu(self.dense0(x), 0.2)
            # N x 1
            x = layers.leaky_relu(self.dense1(x), 0.2)
            return x
Example #15
0
 def send_attention(src_feat, dst_feat, edge_feat):
     """tbd"""
     output = src_feat["left_a"] + dst_feat["right_a"]
     output = layers.leaky_relu(output, alpha=0.2)  # (num_edges, num_heads)
     return {"alpha": output, "h": src_feat["h"] + edge_feat["h"]}
Example #16
0
 def forward(self, x):
     return L.leaky_relu(x, alpha=self.negative_slope)
Example #17
0
 def send_attention(src_feat, dst_feat, edge_feat):
     output = src_feat["left_a"] + dst_feat["right_a"]
     if 'dist_a' in edge_feat:
         output += edge_feat["dist_a"]
     output = L.leaky_relu(output, alpha=0.2)  # (num_edges, num_heads)
     return {"alpha": output, "h": src_feat["h"]}
Example #18
0
 def forward(self, x):
     if self.inplace:
         x.set_value(layers.leaky_relu(x, alpha=self.alpha))
         return x
     else:
         return layers.leaky_relu(x, alpha=self.alpha)
Example #19
0
File: conv.py Project: zzs95/PGL
def gaan(gw, feature, hidden_size_a, hidden_size_v, hidden_size_m, hidden_size_o, heads, name):
    """Implementation of GaAN"""

    def send_func(src_feat, dst_feat, edge_feat):
        # compute attention
        # E * (M * D1)
        feat_query, feat_key = dst_feat['feat_query'], src_feat['feat_key']
        # E * M * D1
        old = feat_query
        feat_query = L.reshape(feat_query, [-1, heads, hidden_size_a])
        feat_key = L.reshape(feat_key, [-1, heads, hidden_size_a])
        # E * M
        alpha = L.reduce_sum(feat_key * feat_query, dim=-1)

        return {'dst_node_feat': dst_feat['node_feat'],
                'src_node_feat': src_feat['node_feat'],
                'feat_value': src_feat['feat_value'],
                'alpha': alpha,
                'feat_gate': src_feat['feat_gate']}

    def recv_func(message):
        # feature of src and dst node on each edge
        dst_feat = message['dst_node_feat']
        src_feat = message['src_node_feat']
        # feature of center node
        x = L.sequence_pool(dst_feat, 'average')
        # feature of neighbors of center node
        z = L.sequence_pool(src_feat, 'average')

        # compute gate
        feat_gate = message['feat_gate']
        g_max = L.sequence_pool(feat_gate, 'max')
        g = L.concat([x, g_max, z], axis=1)
        g = L.fc(g, heads, bias_attr=False, act="sigmoid")

        # softmax
        alpha = message['alpha']
        alpha = paddle_helper.sequence_softmax(alpha) # E * M

        feat_value = message['feat_value'] # E * (M * D2)
        old = feat_value
        feat_value = L.reshape(feat_value, [-1, heads, hidden_size_v]) # E * M * D2
        feat_value = L.elementwise_mul(feat_value, alpha, axis=0)
        feat_value = L.reshape(feat_value, [-1, heads*hidden_size_v]) # E * (M * D2)
        feat_value = L.lod_reset(feat_value, old)

        feat_value = L.sequence_pool(feat_value, 'sum') # N * (M * D2)

        feat_value = L.reshape(feat_value, [-1, heads, hidden_size_v]) # N * M * D2

        output = L.elementwise_mul(feat_value, g, axis=0)
        output = L.reshape(output, [-1, heads * hidden_size_v]) # N * (M * D2)

        output = L.concat([x, output], axis=1)

        return output

    # N * (D1 * M)
    feat_key = L.fc(feature, hidden_size_a * heads, bias_attr=False,
                     param_attr=fluid.ParamAttr(name=name + '_project_key'))
    # N * (D2 * M)
    feat_value = L.fc(feature, hidden_size_v * heads, bias_attr=False,
                     param_attr=fluid.ParamAttr(name=name + '_project_value'))
    # N * (D1 * M)
    feat_query = L.fc(feature, hidden_size_a * heads, bias_attr=False,
                     param_attr=fluid.ParamAttr(name=name + '_project_query'))
    # N * Dm
    feat_gate = L.fc(feature, hidden_size_m, bias_attr=False, 
                                param_attr=fluid.ParamAttr(name=name + '_project_gate'))

    # send
    message = gw.send(
        send_func,
        nfeat_list=[('node_feat', feature), ('feat_key', feat_key), ('feat_value', feat_value),
                    ('feat_query', feat_query), ('feat_gate', feat_gate)],
        efeat_list=None,
    )

    # recv
    output = gw.recv(message, recv_func)
    output = L.fc(output, hidden_size_o, bias_attr=False,
                            param_attr=fluid.ParamAttr(name=name + '_project_output'))
    output = L.leaky_relu(output, alpha=0.1)
    output = L.dropout(output, dropout_prob=0.1)

    return output
Example #20
0
    def forward(self, x, cls=None):
        # x is BxTxCxHxW 注意与2p1d网络输入格式不同
        # spatio-temporal video data
        b, t, c, h, w = x.shape
        # need to view it is B*TxCxHxW for 2D CNN
        # important to keep batch and time axis next to
        # eachother, so a simple view without tranposing is possible
        # 此处存疑,因为torch.dataloader作batch打包录入数据时,各类别是混起来的,而且同类视频间也不方便混起来的,因为要计算表示层光流
        x = reshape(x, shape=[b * t, c, h, w])

        x = self.conv1(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.layer2(x)

        # 插入FCF层

        # res = x  # F.avg_pool2d(x, (3, 1), 1, 0)  # x[:,:,1:-1].contiguous() F表示torch.nn.functional
        res = x
        x = self.flow_cmp(x)
        x = self.flow_layer.norm_img(x)

        # compute flow for 0,1,...,T-1
        #        and       1,2,...,T
        b_t, c, h, w = x.shape
        x = reshape(x, shape=[b, -1, c, h, w])  #将x拆解为BTCHW,后续要对T维度操作
        # 根据有无x=x+res,下面两句二选一
        x = pad(x, paddings=[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
        # t -= 1  # Representation Flow操作后,t少一帧
        u, v = self.flow_layer(reshape(x[:, :-1], shape=[-1, c, h, w]),
                               reshape(x[:, 1:], shape=[-1, c, h, w]))

        x = concat([u, v], axis=1)

        x = self.flow_conv(x)

        # Flow-of-flow
        x = self.flow_cmp2(x)
        x = self.flow_layer.norm_img(x)
        # compute flow for 0,1,...,T-1
        #        and       1,2,...,T
        b_t, c, h, w = x.shape
        x = reshape(x, shape=[b, -1, c, h, w])
        # 根据有无x=x+res,下面两句二选一
        x = pad(x, paddings=[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
        # t -= 1  # Representation Flow操作后,t少一帧
        u, v = self.flow_layer2(reshape(x[:, :-1], shape=[-1, c, h, w]),
                                reshape(x[:, 1:], shape=[-1, c, h, w]))
        x = concat([u, v], axis=1)

        x = self.flow_conv2(x)
        x = self.bnf(x)

        x = x + res
        x = leaky_relu(x)

        #

        x = self.layer3(x)
        x = self.layer4(x)

        #print(x.size())
        x = self.avgpool(x)

        x = reshape(x, shape=[x.shape[0], -1])
        x = self.dropout(x)

        # currently making dense, per-frame predictions
        x = self.fc(x)

        # so view as BxTxClass
        x = reshape(x, shape=[b, t, -1])
        # mean-pool over time
        x = reduce_mean(x, dim=1)  # temporal维度合并

        # return BxClass prediction
        if cls is not None:
            acc = float(accuracy(input=x, label=cls))
            return x, acc
        else:
            return x
def leaky_relu(x):
    return layers.leaky_relu(x, alpha=0.1, name=None)
Example #22
0
 def forward(self, x):
     return L.leaky_relu(x, alpha=self.alpha)