Esempio n. 1
0
    def forward(self, x, y):
        x = nd.concat(x, y, dim=1)
        x = nd.LeakyReLU(self.conv1(x))
        x = nd.LeakyReLU(self.bn2(self.conv2(x)))
        x = nd.LeakyReLU(self.bn3(self.conv3(x)))
        
#        y = nd.expand_dims(y, axis=2)
#        y = nd.expand_dims(y, axis=2)
#        y = nd.tile(y, [4,4])
#        
        x = self.conv4(x)
        
        return x
Esempio n. 2
0
def activation(X, act_type='relu'):
    if act_type == 'relu':
        return nd.maximum(X, nd.zeros_like(X))
    elif act_type == 'elu':
        return nd.LeakyReLU(X, act_type=act_type)
    else:
        print('Something wrong with the act_type!')
Esempio n. 3
0
def leaky_relu(x):
    """
    slope=0.1 leaky ReLu
    :param x: NDArray
    :return: NDArray
    """
    return nd.LeakyReLU(x, slope=.1)
Esempio n. 4
0
    def get(self, pred, label):
        embedding = nd.L2Normalization(pred, mode='instance')
        self.acc = 0
        nc = self.nc
        ns = self.ns
        nq = self.nq
        margin = self.margin

        s_embedding = embedding.slice_axis(axis=0, begin=0, end=nc * ns)
        q_embedding = embedding.slice_axis(axis=0, begin=nc * ns, end=None)
        s_cls_data = nd.reshape(s_embedding, (nc, ns, -1))
        q_cls_data = nd.reshape(q_embedding, (nc, nq, -1))

        s_cls_center = nd.mean(s_cls_data, axis=1)
        s_cls_center = nd.L2Normalization(s_cls_center, mode='instance')

        temp = q_embedding.expand_dims(axis=1) * s_cls_center.expand_dims(
            axis=0)
        data_center_dis = nd.sum(temp, axis=2)
        cur_label = nd.argmax(data_center_dis, axis=1)

        loss = 0
        # Calculating loss
        for i in range(nc):
            temp = data_center_dis[i * nq:(i + 1) * nq, i]
            loss += nd.sum(
                nd.LeakyReLU(margin - temp, act_type='leaky', slope=0.1))

        for i in range(nc):
            self.acc += nd.sum(cur_label[nq * i:nq * (i + 1)] == i).asscalar()
        self.acc /= (nc * nq)

        s_embedding = embedding.slice_axis(axis=0, begin=0, end=nc * ns)
        q_embedding = embedding.slice_axis(axis=0, begin=nc * ns, end=None)

        s_cls_data = nd.reshape(s_embedding, (nc, ns, -1))
        q_cls_data = nd.reshape(q_embedding, (nc, nq, -1))

        s_cls_center = nd.mean(s_cls_data, axis=1)
        s_cls_center = nd.L2Normalization(s_cls_center, mode='instance')
        s_center_broadcast = s_cls_center.expand_dims(axis=1)
        s_center_dis = nd.sum(nd.broadcast_mul(q_cls_data, s_center_broadcast),
                              axis=2)
        temp = nd.LeakyReLU(margin - s_center_dis, act_type='leaky', slope=0.1)
        loss1 = nd.sum(temp)

        return (self.acc, cur_label, loss)
Esempio n. 5
0
def leaky_relu(x):
    """slope=0.1 leaky ReLu

    Parameters
    ----------
    x : NDArray
        Input

    Returns
    -------
    y : NDArray
        y = x > 0 ? x : 0.1 * x
    """
    return nd.LeakyReLU(x, slope=.1)
Esempio n. 6
0
File: gat.py Progetto: zswzifir/dgl
 def edge_attention(self, edges):
     # an edge UDF to compute unnormalized attention values from src and dst
     a = nd.LeakyReLU(edges.src['a1'] + edges.dst['a2'], slope=self.alpha)
     return {'a': a}
Esempio n. 7
0
def leaky_relu(x):
    return nd.LeakyReLU(x, slope=.1)
Esempio n. 8
0
def net_PRL(X,
            params,
            debug=False,
            pool_type='max',
            pool_size=4,
            pool_stride=2):
    [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6, W7, b7, W8, b8, W9,
     b9] = params
    drop_prob = 0.5
    ########################
    #  Define the computation of the first convolutional layer
    ########################
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=(1, 64),
                             num_filter=8,
                             stride=(1, 1),
                             dilate=(1, 1))
    h1 = nd.LeakyReLU(h1_conv, act_type='elu')
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    ########################
    #  Define the computation of the second convolutional layer
    ########################
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=(1, 32),
                             num_filter=8,
                             stride=(1, 1),
                             dilate=(1, 1))
    h2_pooling = nd.Pooling(data=h2_conv,
                            pool_type=pool_type,
                            kernel=(1, 8),
                            stride=(1, pool_stride))
    h2 = nd.LeakyReLU(h2_pooling, act_type='elu')
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Define the computation of the third convolutional layer
    ########################
    h3_conv = nd.Convolution(data=h2,
                             weight=W3,
                             bias=b3,
                             kernel=(1, 32),
                             num_filter=16,
                             stride=(1, 1),
                             dilate=(1, 1))
    h3 = nd.LeakyReLU(h3_conv, act_type='elu')
    if debug:
        print("h3 shape: %s" % (np.array(h3.shape)))

    ########################
    #  Define the computation of the 4th convolutional layer
    ########################
    h4_conv = nd.Convolution(data=h3,
                             weight=W4,
                             bias=b4,
                             kernel=(1, 16),
                             num_filter=16,
                             stride=(1, 1),
                             dilate=(1, 1))
    h4_pooling = nd.Pooling(data=h4_conv,
                            pool_type=pool_type,
                            kernel=(1, 6),
                            stride=(1, pool_stride))
    h4 = nd.LeakyReLU(h4_pooling, act_type='elu')
    if debug:
        print("h4 shape: %s" % (np.array(h4.shape)))

    ########################
    #  Define the computation of the 5th convolutional layer
    ########################
    h5_conv = nd.Convolution(data=h4,
                             weight=W5,
                             bias=b5,
                             kernel=(1, 16),
                             num_filter=32,
                             stride=(1, 1),
                             dilate=(1, 1))
    h5 = nd.LeakyReLU(h5_conv, act_type='elu')
    if debug:
        print("h5 shape: %s" % (np.array(h5.shape)))

    ########################
    #  Define the computation of the 6th convolutional layer
    ########################
    h6_conv = nd.Convolution(data=h5,
                             weight=W6,
                             bias=b6,
                             kernel=(1, 16),
                             num_filter=32,
                             stride=(1, 1),
                             dilate=(1, 1))
    h6_pooling = nd.Pooling(data=h6_conv,
                            pool_type=pool_type,
                            kernel=(1, 4),
                            stride=(1, pool_stride))
    h6 = nd.LeakyReLU(h6_pooling, act_type='elu')
    if debug:
        print("h6 shape: %s" % (np.array(h6.shape)))

    ########################
    #  Flattening h6 so that we can feed it into a fully-connected layer
    ########################
    h7 = nd.flatten(h6)
    if debug:
        print("Flat h7 shape: %s" % (np.array(h7.shape)))

    ########################
    #  Define the computation of the 8th (fully-connected) layer
    ########################
    h8_linear = nd.dot(h7, W7) + b7
    h8 = nd.LeakyReLU(h8_linear, act_type='elu')
    if autograd.is_training():
        # 对激活函数的输出使用droupout
        h8 = dropout(h8, drop_prob)
    if debug:
        print("h8 shape: %s" % (np.array(h8.shape)))

    ########################
    #  Define the computation of the 9th (fully-connected) layer
    ########################
    h9_linear = nd.dot(h8, W8) + b8
    h9 = nd.LeakyReLU(h9_linear, act_type='elu')
    if autograd.is_training():
        # 对激活函数的输出使用droupout
        h9 = dropout(h9, drop_prob)
    if debug:
        print("h9 shape: %s" % (np.array(h9.shape)))

    ########################
    #  Define the computation of the output layer
    ########################
    yhat_linear = nd.dot(h9, W9) + b9
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))

    interlayer = [
        W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6, W7, b7, W8, b8, W9, b9
    ]

    return yhat_linear, interlayer