コード例 #1
0
ファイル: capnet.py プロジェクト: Godricly/Capsule_RCNN
def model_forward(x, net, down_samples, class_preds, box_preds, cap_transforms,
                  sizes, ratios):
    # extract feature with the body network
    x = net(x)

    # for each scale, add anchors, box and class predictions,
    # then compute the input to next scale
    default_anchors = []
    predicted_boxes = []
    predicted_classes = []

    for i in range(5):
        default_anchors.append(
            MultiBoxPrior(x, sizes=sizes[i], ratios=ratios[i]))
        prime_out = cap_transforms[i](x)
        class_capout = class_preds[i * 2](prime_out)
        class_pred = class_preds[i * 2 + 1](class_capout)
        class_pred = nd.flatten(nd.transpose(class_pred, (0, 2, 3, 1)))
        '''
        class_pred = class_preds[i](x)
        class_pred = nd.flatten(nd.transpose(class_pred, (0,2,3,1)))
        '''

        box_pred = nd.flatten(nd.transpose(box_preds[i](x), (0, 2, 3, 1)))
        # print class_pred.shape, box_pred.shape
        print class_pred.shape
        # print class_pred.shape
        predicted_boxes.append(box_pred)
        predicted_classes.append(class_pred)
        if i < 3:
            x = down_samples[i](x)
        elif i == 3:
            # simply use the pooling layer
            x = nd.Pooling(x, global_pool=True, pool_type='max', kernel=(4, 4))
    return default_anchors, predicted_classes, predicted_boxes
コード例 #2
0
def net(X, verbose=False):
    X = X.reshape((batch_size, 1, 28, 28))
    out1 = nd.Convolution(data=X,
                          weight=w1,
                          bias=b1,
                          kernel=w1.shape[2:],
                          num_filter=w1.shape[0])
    out2 = nd.relu(out1)
    out3 = nd.Pooling(data=out2, pool_type="max", kernel=(2, 2), stride=(2, 2))
    out4 = nd.Convolution(data=out3,
                          weight=w2,
                          bias=b2,
                          kernel=w2.shape[2:],
                          num_filter=w2.shape[0])
    out5 = nd.relu(out4)
    out6 = nd.Pooling(data=out5, pool_type="max", kernel=(2, 2), stride=(2, 2))
    out7 = nd.dot(nd.flatten(out6), w3) + b3
    out8 = nd.relu(out7)
    out9 = nd.dot(out8, w4) + b4
    if verbose:
        print('1st conv block:', out3.shape)
        print('2nd conv block:', out5.shape)
        print('2nd conv block:', out6.shape)
        print('2nd conv block:', out7.shape)
        print('1st dense:', out8.shape)
        print('2nd dense:', out9.shape)
        # print('output:', out9)
    return out9
コード例 #3
0
def LetNet_Direct(X, verbose=False):
    # 初始化参数
    params = initialize_params()
    W1, b1, W2, b2, W3, b3, W4, b4 = params
    # 1. 将数据集copy到GPU
    X = X.as_in_context(ctx)
    
    # 2. 定义卷积层
    # 2.1 卷积层一
    h1_conv = nd.Convolution(data=X, weight=W1, bias=b1,  kernel=W1.shape[2:],num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation, pool_type='max', kernel=(2,2),stride=(2,2))
    # 2.2 卷积层二
    h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2,kernel=W2.shape[2:], num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation, pool_type='max', kernel=(2,2), stride=(2,2))  #Plooing的Kernel值,决定输出图像大小
    # Flatten成2-dimens矩阵,以作为Dense层输入
    h2 = nd.flatten(h2)    

    # 3. 定义全连接层
    # 3.1 第一层全连接:激活函数非线性
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # 3.2 第二层全连接
    h4 = nd.dot(h3_linear, W4) + b4
    
    # 是否显示详细信息:各层代码块的输出shape
    if verbose:
        print('1st conv block: ', h1.shape)
        print('2st conv block: ', h2.shape)
        print('3st dense: ', h3.shape)
        print('4st dense: ', h4.shape)
    return h4
コード例 #4
0
    def net(self, X, debug=False):
        ########################
        #  Define the computation of the first convolutional layer
        ########################
        h1_conv = nd.Convolution(data=X, weight=self.W1, bias=self.b1, kernel=(2, 3, 3), num_filter=20)
        h1_activation = self.relu(h1_conv)
        h1 = nd.Pooling(data=h1_activation, pool_type="avg", kernel=(2, 2, 2), stride=(2, 2, 2))

        ########################
        #  Define the computation of the second convolutional layer
        ########################
        h2_conv = nd.Convolution(data=h1, weight=self.W2, bias=self.b2, kernel=(1, 5, 5), num_filter=50)
        h2_activation = self.relu(h2_conv)
        h2 = nd.Pooling(data=h2_activation, pool_type="avg", kernel=(1, 2, 2), stride=(1, 2, 2))

        ########################
        #  Flattening h2 so that we can feed it into a fully-connected layer
        ########################
        h2 = nd.flatten(h2)

        ########################
        #  Define the computation of the third (fully-connected) layer
        ########################
        h3_linear = nd.dot(h2, self.W3) + self.b3
        h3 = self.relu(h3_linear)

        ########################
        #  Define the computation of the output layer
        ########################
        yhat_linear = nd.dot(h3, self.W4) + self.b4
        #yhat = self.softmax(yhat_linear)

        return yhat_linear
コード例 #5
0
def lenet(X, params):
    h1_conv = nd.Convolution(data=X,
                             weight=params[0],
                             bias=params[1],
                             kernel=(3, 3),
                             num_filter=20)
    h1_activation = nd.reshape(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='avg',
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=params[2],
                             bias=params[3],
                             kernel=(5, 5),
                             num_filter=50)
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='avg',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, params[4]) + params[5]
    h3 = nd.relu(h3_linear)
    y_hat = nd.dot(h3, params[6]) + params[7]
    return y_hat
コード例 #6
0
ファイル: cnn-scratch.py プロジェクト: JiaLei123/ML_camp
def net(X, verbose=False):
    h1_conv = nd.Convolution(data=X,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=w1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=w2,
                             bias=b2,
                             kernel=w2.shape[2:],
                             num_filter=w2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, w3) + b3
    h3 = nd.relu(h3_linear)

    h4_linear = nd.dot(h3, w4) + b4
    return h4_linear
コード例 #7
0
def net(X, params, is_training=True, debug=False):

    W1, b1, gamma1, beta1, W2, b2, gamma2, beta2, W3, b3, gamma3, beta3, W4, b4 = params

    h1_conv = nd.Convolution(data=X, weight=W1, bias=b1, kernel=(3, 3), num_filter=20)
    h1_normed = batch_norm(h1_conv, gamma1, beta1, scope_name='bn1', is_training=is_training)
    h1_activation = relu(h1_normed)
    h1 = nd.Pooling(data=h1_activation, pool_type="avg", kernel=(2, 2), stride=(2, 2))
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2, kernel=(5, 5), num_filter=50)
    h2_normed = batch_norm(h2_conv, gamma2, beta2, scope_name='bn2', is_training=is_training)
    h2_activation = relu(h2_normed)
    h2 = nd.Pooling(data=h2_activation, pool_type="avg", kernel=(2, 2), stride=(2, 2))
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    h2 = nd.flatten(h2)
    if debug:
        print("Flat h2 shape: %s" % (np.array(h2.shape)))

    h3_linear = nd.dot(h2, W3) + b3
    h3_normed = batch_norm(h3_linear, gamma3, beta3, scope_name="bn3", is_training=is_training)
    h3 = relu(h3_normed)

    yhat_linear = nd.dot(h3, W4) + b4
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))
    return yhat_linear
コード例 #8
0
 def forward(self, x):
     # Initial hidden state
     # sentence representation할 때 hidden의 context가 cpu여서 오류 발생. context를 gpu로 전환
     x, att = self.sen_rep(x)
     x = nd.flatten(x)
     res = self.classifier(x)
     return res, att
コード例 #9
0
def test_flatten():
    a = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y).reshape(
        (LARGE_X // 2, 2, SMALL_Y))
    b = nd.flatten(a)
    assert b[-1][-1] == (LARGE_X - 1)
    assert b[-1][0] == (LARGE_X - 2)
    assert b.shape == (LARGE_X // 2, SMALL_Y * 2)
def net(X, debug=False):
    ########################
    #  Define the computation of the first convolutional layer
    ########################
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=(3, 3),
                             num_filter=20)
    h1_activation = relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="avg",
                    kernel=(2, 2),
                    stride=(2, 2))
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    ########################
    #  Define the computation of the second convolutional layer
    ########################
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=(5, 5),
                             num_filter=50)
    h2_activation = relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="avg",
                    kernel=(2, 2),
                    stride=(2, 2))
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Flattening h2 so that we can feed it into a fully-connected layer
    ########################
    h2 = nd.flatten(h2)
    if debug:
        print("Flat h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Define the computation of the third (fully-connected) layer
    ########################
    h3_linear = nd.dot(h2, W3) + b3
    h3 = relu(h3_linear)
    if debug:
        print("h3 shape: %s" % (np.array(h3.shape)))

    ########################
    #  Define the computation of the output layer
    ########################
    yhat_linear = nd.dot(h3, W4) + b4
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))

    return yhat_linear
コード例 #11
0
ファイル: A2C_ram_human.py プロジェクト: yanxu1per/tiaojiaoRL
 def forward(self, x):
     x = nd.relu(self.bn1(self.conv1(x)))
     x = nd.relu(self.bn2(self.conv2(x)))
     x = nd.relu(self.bn3(self.conv3(x)))
     x = nd.flatten(x).expand_dims(0)
     #x, self.states = self.lstm(x, self.states)
     x = self.dense1(x)
     x = self.dense2(x)
     probs = self.action_pred(x)
     values = self.value_pred(x)
     return mx.ndarray.softmax(probs), values
コード例 #12
0
 def forward(self, inputs):
     # 将两个形状是(批量大小, 词数, 词向量维度)的嵌入层的输出按词向量连结
     embeddings = nd.concat(
         self.embedding(inputs), self.constant_embedding(inputs), dim=2)
     # 根据Conv1D要求的输入格式,将词向量维,即一维卷积层的通道维,变换到前一维
     embeddings = embeddings.transpose((0, 2, 1))
     # 对于每个一维卷积层,在时序最大池化后会得到一个形状为(批量大小, 通道大小, 1)的
     # NDArray。使用flatten函数去掉最后一维,然后在通道维上连结
     encoding = nd.concat(*[nd.flatten(
         self.pool(conv(embeddings))) for conv in self.convs], dim=1)
     # 应用丢弃法后使用全连接层得到输出
     outputs = self.decoder(self.dropout(encoding))
     return outputs
コード例 #13
0
def net(X, verbose=False):
    X = X.as_in_context(W1.context)

    # 第一层卷积
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))

    # 第二层卷积
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    # 第一层全连接
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)

    # 第二层全连接
    h4_linear = nd.dot(h3, W4) + b4

    if verbose:
        print('h1_conv:', h1_conv.shape)
        print('h1_activation:', h1_activation.shape)
        print('1st conv block:', h1.shape)
        print('-------------------\n')

        print('h2_conv:', h2_conv.shape)
        print('h2_activation:', h2_activation.shape)
        print('2nd conv block:', h2.shape)
        print('-------------------\n')

        print('h3_linear:', h3_linear.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
コード例 #14
0
 def forward(self, inputs):
     # (batchsize, len(vocab), emb_size) contaced by emb_size
     embeddings = nd.concat(self.embedding(inputs),
                            self.constant_embedding(inputs),
                            dim=2)
     # Output:(batchsize,  emb_size, len(vocab))
     embeddings = embeddings.transpose((0, 2, 1))
     # Output:(batchsize, outputchannels,1) and then connect in the second dimension
     encoding = nd.concat(
         *[nd.flatten(self.pool(conv(embeddings))) for conv in self.convs],
         dim=1)
     # Apply dropout and then fully connected layer
     outputs = self.decoder(self.dropout(encoding))
     return outputs
コード例 #15
0
ファイル: d2lzh.py プロジェクト: LongClawZcw/mxnet_models
 def forward(self, inputs):
     # 将两个形状是(批量⼤⼩,词数,词向量维度)的嵌⼊层的输出按词向量连结。
     embeddings = nd.concat(self.embedding(inputs),
                            self.constant_embedding(inputs),
                            dim=2)
     # 根据 Conv1D 要求的输⼊格式,将词向量维,即⼀维卷积层的通道维,变换到前⼀维。
     embeddings = embeddings.transpose((0, 2, 1))
     # 对于每个⼀维卷积层,在时序最⼤池化后会得到⼀个形状为(批量⼤⼩,通道⼤⼩,1)的
     # NDArray。使⽤ flatten 函数去掉最后⼀维,然后在通道维上连结。
     encoding = nd.concat(
         *[nd.flatten(self.pool(conv(embeddings))) for conv in self.convs],
         dim=1)
     # 应⽤丢弃法后使⽤全连接层得到输出。
     outputs = self.decoder(self.dropout(encoding))
     return outputs
コード例 #16
0
 def forward(self, inputs):
     # 将两个形状是(批量大小, 词数, 词向量维度)的嵌入层的输出按词向量连结
     # NxTx2D
     embeddings = nd.concat(self.embedding(inputs), self.constant_embedding(inputs), dim=2)
     # 根据Conv1D要求的输入格式,将词向量维,即一维卷积层的通道维,变换到前一维
     # Nx2DxT   T相当于图像中的W  2D相当于图像中的C   NxCxW
     embeddings = embeddings.transpose((0, 2, 1))
     # 对于每个一维卷积层,在时序最大池化后会得到一个形状为(批量大小, 通道大小, 1)的
     # NDArray。使用flatten函数去掉最后一维,然后在通道维上连结
     # Nx2DxT Cxk -> NxCx(T-k+1) -> NxCx1 -> NxC -> [NxC, NxC, NxC] -> Nx3C
     encoding = nd.concat(*[nd.flatten(self.pool(conv(embeddings))) for conv in self.convs], dim=1) # dim=1 是相对于concat而说的
     # 应用丢弃法后使用全连接层得到输出
     # Nx3C -> Nx2
     outputs = self.decoder(self.dropout(encoding))
     return outputs
コード例 #17
0
def net(X, is_training=False, verbose=False):
    X = X.as_in_context(W1.context)
    # X = X.flatten().reshape((X.shape[0], X.shape[3], X.shape[1], X.shape[2]))
    # 第一层卷积
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    # 第一层归一化
    h1_bn = batch_norm(h1_conv, gamma1, beta1, is_training, moving_mean1,
                       moving_variance1)
    # 第一层激活函数
    h1_activation = nd.relu(h1_bn)
    # 第一层pooling
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    # 第二层卷积
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_bn = batch_norm(h2_conv, gamma2, beta2, is_training, moving_mean2,
                       moving_variance2)
    h2_activation = nd.relu(h2_bn)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    h2 = nd.flatten(h2)

    # 第一层全连接
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # 第二层全连接
    h4_linear = nd.dot(h3, W4) + b4
    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
コード例 #18
0
    def forward(self, inputs):
        # 将两个形状 (批量大小,词数,词向量维度)的嵌入层的输出按词向量连结
        embeddings = nd.concat(self.embedding(inputs),
                               self.constant_embedding(inputs),
                               dim=2)

        # 根据 Conv1D 要求的输入格式,将词向量维,即:一维卷积层的通道维,变换到前一维
        embeddings = embeddings.transpose((0, 2, 1))

        # 对每一维卷积层,在时序最大池化后会得到一个形状为 (批量大小,通道大小,1) 的
        # NDArray;使用 flatten 函数去掉最后一维,然后再通道维上进行连结
        encoding = nd.concat(
            *[nd.flatten(self.pool(conv(embeddings))) for conv in self.convs],
            dim=1)

        outputs = self.decoder(self.dropout(encoding))

        return outputs
コード例 #19
0
    def net(self, X, debug=False):
        ########################
        #  Define the computation of the first convolutional layer
        ########################
        h1_conv = nd.Convolution(data=X, weight=self.W1, bias=self.b1, kernel=(2, 8, 8), num_filter=16)
        if debug: print("h1 shape: %s" % (np.array(h1_conv.shape)))
        h1_activation = self.relu(h1_conv)
        if debug: print("h1 shape: %s" % (np.array(h1_activation.shape)))
        #h1 = nd.Pooling(data=h1_activation, pool_type="avg", kernel=(2, 2, 2), stride=(2, 2, 2))
        h1 = h1_activation
        if debug: print("h1 shape: %s" % (np.array(h1.shape)))

        ########################
        #  Define the computation of the second convolutional layer
        ########################
        h2_conv = nd.Convolution(data=h1, weight=self.W2, bias=self.b2, kernel=(1, 4, 4), num_filter=32)
        if debug: print("h2 shape: %s" % (np.array(h2_conv.shape)))
        h2_activation = self.relu(h2_conv)
        if debug: print("h2 shape: %s" % (np.array(h2_activation.shape)))
        #h2 = nd.Pooling(data=h2_activation, pool_type="avg", kernel=(1, 2, 2), stride=(1, 2, 2))
        h2 = h2_activation
        if debug: print("h2 shape: %s" % (np.array(h2.shape)))

        ########################
        #  Flattening h2 so that we can feed it into a fully-connected layer
        ########################
        h2 = nd.flatten(h2)
        if debug: print("Flat h2 shape: %s" % (np.array(h2.shape)))

        ########################
        #  Define the computation of the third (fully-connected) layer
        ########################
        h3_linear = nd.dot(h2, self.W3) + self.b3
        h3 = self.relu(h3_linear)
        if debug: print("h3 shape: %s" % (np.array(h3.shape)))

        ########################
        #  Define the computation of the output layer
        ########################
        yhat_linear = nd.dot(h3, self.W4) + self.b4
        #yhat = self.softmax(yhat_linear)
        if debug: print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))

        return yhat_linear
コード例 #20
0
def net(X, verbose=False):
    X = X.as_in_context(W1.context)
    # first convolution layer
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)

    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    # print(h1.shape)
    # second convolution layer
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    # print(h2.shape)

    h2 = nd.flatten(h2)

    # first layer fully connected
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # second layer fully connected
    h4_linear = nd.dot(h3, W4) + b4

    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
コード例 #21
0
 def forward(self, inputs):
     # Concatenate the output of two embedding layers with shape of
     # (batch size, number of words, word vector dimension) by word vector
     embeddings = nd.concat(
         self.embedding(inputs), self.constant_embedding(inputs), dim=2)
     # According to the input format required by Conv1D, the word vector
     # dimension, that is, the channel dimension of the one-dimensional
     # convolutional layer, is transformed into the previous dimension
     embeddings = embeddings.transpose((0, 2, 1))
     # For each one-dimensional convolutional layer, after max-over-time
     # pooling, an NDArray with the shape of (batch size, channel size, 1)
     # can be obtained. Use the flatten function to remove the last
     # dimension and then concatenate on the channel dimension
     encoding = nd.concat(*[nd.flatten(
         self.pool(conv(embeddings))) for conv in self.convs], dim=1)
     # After applying the dropout method, use a fully connected layer to
     # obtain the output
     outputs = self.decoder(self.dropout(encoding))
     return outputs
コード例 #22
0
def Net(X, is_training=False, verbose=False):
    # 第一层卷积
    h1_conv = nd.Convolution(X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=c1)
    h1_bn = batch_norm(h1_conv, gamma1, beta1, is_training, moving_mean1,
                       moving_variance1)
    h1_activation = nd.Activation(h1_bn, act_type='relu')
    h1 = nd.Pooling(h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    # second Convolution
    h2_conv = nd.Convolution(h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=c2)
    h2_bn = batch_norm(h2_conv, gamma2, beta2, is_training, moving_mean2,
                       moving_variance2)
    h2_activation = nd.relu(h2_bn)
    h2 = nd.Pooling(h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    h4_linear = nd.dot(h3, W4) + b4
    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense block:', h3.shape)
        print('2nd dense block:', h4_linear.shape)
        print('output:', h4_linear)

    return h4_linear
コード例 #23
0
 def forward(self, inputs):
     """
     inputs: (batch_size, max_length)
     
     """
     embeddings = nd.concat(
         self.embedding(inputs), self.constant_embedding(inputs),
         dim=2)  # return (batch_size, max_length, embed_size * 2)
     embeddings = embeddings.transpose((0, 2, 1))  # Conv1D required
     """
     for each convs, the output of conv(embeddings) should be: (batch_size, num_channels of that convs, max_length - kerner_size + 1)
     for each convs, the output of maxpool(conv(embeddings)) should be: (batch_size, num_channels of that convs, 1)
     for each convs, the output of nd.flatten(maxpool(conv(embeddings))) should be (batch_size, num_channels of that convs)
     encoding: concate all the convs and return (batch_size, sum(all kernel_sizes))
     
     """
     encoding = nd.concat(*[
         nd.flatten(self.maxpool(conv(embeddings))) for conv in self.convs
     ],
                          dim=1)
     # use the dropout to prevent overfit
     outputs = self.decoder(self.dropout(encoding))
     return outputs
コード例 #24
0
 def forward(self, inputs):
     # 将 inputs 的形状由(批量大小,词数)变换为(词数,批量大小)。
     inputs = inputs.T
     # 根据 Conv1D 要求的输入形状,embeddings_static 和 embeddings_non_static
     # 的形状由(词数,批量大小,词向量维度)变换为(批量大小,词向量维度,词数)。
     embeddings_static = self.embedding_static(inputs).transpose((1, 2, 0))
     embeddings_non_static = self.embedding_non_static(inputs).transpose(
         (1, 2, 0))
     # 将 embeddings_static 和 embeddings_non_static 按词向量维度连结。
     embeddings = nd.concat(embeddings_static, embeddings_non_static, dim=1)
     # 对于第 i 个卷积核,在时序最大池化后会得到一个形状为
     # (批量大小,nums_channels[i],1)的矩阵。使用 flatten 函数将它形状压成
     # (批量大小,nums_channels[i])。
     encoding = [
         nd.flatten(
             self.get_pool(i)(self.get_bn(i)(self.get_conv(i)(embeddings))))
         for i in range(len(self.ngram_kernel_sizes))
     ]
     # 将批量按各通道的输出连结。encoding 的形状:
     # (批量大小,nums_channels 各元素之和)。
     encoding = nd.concat(*encoding, dim=1)
     outputs = self.decoder(self.dropout(encoding))
     return outputs
コード例 #25
0
ファイル: cnn_basic.py プロジェクト: chamlhy/python
def net(X, verbose=False):
	X = X.as_in_context(W1.context) #将X转到和W1一样的ctx上
	#第一层卷积
	h1_cov = nd.Convolution(data=X, weight=W1, bias=b1, kernel=W1.shape[2:], num_filter=W1.shape[0])
	h1_activation = nd.relu(h1_cov)
	h1 = h1.Pooling(data=h1_activation, pool_type="max", kernel=(2,2), stride=(2,2))
	#第二层卷积
	h2_cov = nd.Convolution(data=h1, weight=W2, bias=b2, kernel=W2.shape[2:], num_filter=W2.shape[0])
	h2_activation = nd.relu(h2_cov)
	h2 = nd.Pooling(data=h2_activation, pool_type="max", kernel=(2,2), stride=(2,2))
	#拉平,变为全连接
	h2 = nd.flatten(h2)
	#第一层全连接
	h3_linear = nd.dot(h2, W3) + b3
	h3 = nd.relu(h3_linear)
	#第四层全连接
	h4_linear = nd.dot(h3, W4) + b4
	#用于打印网络规模
	if verbose:
		print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
コード例 #26
0
 def flatten_prediction(pred):
     return nd.flatten(nd.transpose(pred, axes=(0, 2, 3, 1)))
コード例 #27
0
 def _flatten_prediction(self, pred):
     '''
     Helper function to flatten the predicted bounding boxes and categories
     '''
     return nd.flatten(nd.transpose(pred, axes=(0, 2, 3, 1)))
コード例 #28
0
# this make mxnet faster then numpy. mxnet uses a optimized library: Intel MKL for CPU operation.
# with gpu its 30x faster then numpy

# convert mxnet ndarray to numpy ndarray
# this operation is synchronous (= further computation is blocked until function is finished.)
matrix = matrix.asnumpy()
print(matrix)

# convert numpy to mxnet ndarray
matrix = nd.array(matrix)
print(matrix)

# calculate the average intensity for every pixel
matrix = nd.random.randint(low=0, high=255, shape=(1, 3, 3))
print(matrix)
flat = nd.flatten(matrix)
flat = flat[0]
print(flat.shape)

a = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])
print(a.shape)
for row in a:
    for col in row:
        print(col)

a = np.arange(8).reshape(2, 4)
print(a.shape)

# https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/training/normalization/index.html#Data-Normalization

print('''