Exemple #1
0
def lenet(X, params):
    h1_conv = nd.Convolution(data=X,
                             weight=params[0],
                             bias=params[1],
                             kernel=(3, 3),
                             num_filter=20)
    h1_activation = nd.reshape(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='avg',
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=params[2],
                             bias=params[3],
                             kernel=(5, 5),
                             num_filter=50)
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='avg',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, params[4]) + params[5]
    h3 = nd.relu(h3_linear)
    y_hat = nd.dot(h3, params[6]) + params[7]
    return y_hat
Exemple #2
0
def lenet(X, params):
    h1_conv = nd.Convolution(data=X,
                             weight=params[0],
                             bias=params[1],
                             kernel=(3, 3),
                             num_filter=20)
    h1_act = nd.relu(data=h1_conv)
    h1_pool = nd.Pooling(data=h1_act,
                         pool_type="avg",
                         kernel=(2, 2),
                         stride=(2, 2))
    h2_conv = nd.Convolution(data=h1_pool,
                             weight=params[2],
                             bias=params[3],
                             kernel=(5, 5),
                             num_filter=50)
    h2_act = nd.relu(data=h2_conv)
    h2_pool = nd.Pooling(data=h2_act,
                         pool_type="avg",
                         kernel=(2, 2),
                         stride=(2, 2))
    h2_flatten = nd.Flatten(data=h2_pool)
    h3_fc = nd.dot(h2_flatten, params[4]) + params[5]
    h3_act = nd.relu(data=h3_fc)
    h4_fc = nd.dot(h3_act, params[6]) + params[7]
    return h4_fc
    def net(self, X, debug=False):
        ########################
        #  Define the computation of the first convolutional layer
        ########################
        h1_conv = nd.Convolution(data=X, weight=self.W1, bias=self.b1, kernel=(2, 3, 3), num_filter=20)
        h1_activation = self.relu(h1_conv)
        h1 = nd.Pooling(data=h1_activation, pool_type="avg", kernel=(2, 2, 2), stride=(2, 2, 2))

        ########################
        #  Define the computation of the second convolutional layer
        ########################
        h2_conv = nd.Convolution(data=h1, weight=self.W2, bias=self.b2, kernel=(1, 5, 5), num_filter=50)
        h2_activation = self.relu(h2_conv)
        h2 = nd.Pooling(data=h2_activation, pool_type="avg", kernel=(1, 2, 2), stride=(1, 2, 2))

        ########################
        #  Flattening h2 so that we can feed it into a fully-connected layer
        ########################
        h2 = nd.flatten(h2)

        ########################
        #  Define the computation of the third (fully-connected) layer
        ########################
        h3_linear = nd.dot(h2, self.W3) + self.b3
        h3 = self.relu(h3_linear)

        ########################
        #  Define the computation of the output layer
        ########################
        yhat_linear = nd.dot(h3, self.W4) + self.b4
        #yhat = self.softmax(yhat_linear)

        return yhat_linear
def LetNet_Direct(X, verbose=False):
    # 初始化参数
    params = initialize_params()
    W1, b1, W2, b2, W3, b3, W4, b4 = params
    # 1. 将数据集copy到GPU
    X = X.as_in_context(ctx)
    
    # 2. 定义卷积层
    # 2.1 卷积层一
    h1_conv = nd.Convolution(data=X, weight=W1, bias=b1,  kernel=W1.shape[2:],num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation, pool_type='max', kernel=(2,2),stride=(2,2))
    # 2.2 卷积层二
    h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2,kernel=W2.shape[2:], num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation, pool_type='max', kernel=(2,2), stride=(2,2))  #Plooing的Kernel值,决定输出图像大小
    # Flatten成2-dimens矩阵,以作为Dense层输入
    h2 = nd.flatten(h2)    

    # 3. 定义全连接层
    # 3.1 第一层全连接:激活函数非线性
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # 3.2 第二层全连接
    h4 = nd.dot(h3_linear, W4) + b4
    
    # 是否显示详细信息:各层代码块的输出shape
    if verbose:
        print('1st conv block: ', h1.shape)
        print('2st conv block: ', h2.shape)
        print('3st dense: ', h3.shape)
        print('4st dense: ', h4.shape)
    return h4
Exemple #5
0
def net(x):
    # x = x.as_in_context(w1.context)
    # first conv layer
    # (bs, 1, 28, 28) ==> (bs, 20, 12, 12)
    h1_conv = nd.Convolution(data=x,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=w1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    # second conv layer
    # (bs, 20, 12, 12) ==> (bs, 50, 5, 5) ==> (bs, 50*5*5)
    h2_conv = nd.Convolution(data=h1,
                             weight=w2,
                             bias=b2,
                             kernel=w2.shape[2:],
                             num_filter=w2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = h2.flatten()
    # first fc layer
    # (bs, 1250) ==> (bs, 128)
    h3 = nd.relu(nd.dot(h2, w3) + b3)
    # second fc layer
    # (bs, 128) ==> (bs, 10)
    h4 = nd.dot(h3, w4) + b4
    return h4
def net(X, verbose=False):
    X = X.reshape((batch_size, 1, 28, 28))
    out1 = nd.Convolution(data=X,
                          weight=w1,
                          bias=b1,
                          kernel=w1.shape[2:],
                          num_filter=w1.shape[0])
    out2 = nd.relu(out1)
    out3 = nd.Pooling(data=out2, pool_type="max", kernel=(2, 2), stride=(2, 2))
    out4 = nd.Convolution(data=out3,
                          weight=w2,
                          bias=b2,
                          kernel=w2.shape[2:],
                          num_filter=w2.shape[0])
    out5 = nd.relu(out4)
    out6 = nd.Pooling(data=out5, pool_type="max", kernel=(2, 2), stride=(2, 2))
    out7 = nd.dot(nd.flatten(out6), w3) + b3
    out8 = nd.relu(out7)
    out9 = nd.dot(out8, w4) + b4
    if verbose:
        print('1st conv block:', out3.shape)
        print('2nd conv block:', out5.shape)
        print('2nd conv block:', out6.shape)
        print('2nd conv block:', out7.shape)
        print('1st dense:', out8.shape)
        print('2nd dense:', out9.shape)
        # print('output:', out9)
    return out9
Exemple #7
0
def net(X, verbose=False):
    h1_conv = nd.Convolution(data=X,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=w1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=w2,
                             bias=b2,
                             kernel=w2.shape[2:],
                             num_filter=w2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, w3) + b3
    h3 = nd.relu(h3_linear)

    h4_linear = nd.dot(h3, w4) + b4
    return h4_linear
def net(X, params, is_training=True, debug=False):

    W1, b1, gamma1, beta1, W2, b2, gamma2, beta2, W3, b3, gamma3, beta3, W4, b4 = params

    h1_conv = nd.Convolution(data=X, weight=W1, bias=b1, kernel=(3, 3), num_filter=20)
    h1_normed = batch_norm(h1_conv, gamma1, beta1, scope_name='bn1', is_training=is_training)
    h1_activation = relu(h1_normed)
    h1 = nd.Pooling(data=h1_activation, pool_type="avg", kernel=(2, 2), stride=(2, 2))
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2, kernel=(5, 5), num_filter=50)
    h2_normed = batch_norm(h2_conv, gamma2, beta2, scope_name='bn2', is_training=is_training)
    h2_activation = relu(h2_normed)
    h2 = nd.Pooling(data=h2_activation, pool_type="avg", kernel=(2, 2), stride=(2, 2))
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    h2 = nd.flatten(h2)
    if debug:
        print("Flat h2 shape: %s" % (np.array(h2.shape)))

    h3_linear = nd.dot(h2, W3) + b3
    h3_normed = batch_norm(h3_linear, gamma3, beta3, scope_name="bn3", is_training=is_training)
    h3 = relu(h3_normed)

    yhat_linear = nd.dot(h3, W4) + b4
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))
    return yhat_linear
def net(X, debug=False):
    ########################
    #  Define the computation of the first convolutional layer
    ########################
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=(3, 3),
                             num_filter=20)
    h1_activation = relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="avg",
                    kernel=(2, 2),
                    stride=(2, 2))
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    ########################
    #  Define the computation of the second convolutional layer
    ########################
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=(5, 5),
                             num_filter=50)
    h2_activation = relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="avg",
                    kernel=(2, 2),
                    stride=(2, 2))
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Flattening h2 so that we can feed it into a fully-connected layer
    ########################
    h2 = nd.flatten(h2)
    if debug:
        print("Flat h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Define the computation of the third (fully-connected) layer
    ########################
    h3_linear = nd.dot(h2, W3) + b3
    h3 = relu(h3_linear)
    if debug:
        print("h3 shape: %s" % (np.array(h3.shape)))

    ########################
    #  Define the computation of the output layer
    ########################
    yhat_linear = nd.dot(h3, W4) + b4
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))

    return yhat_linear
Exemple #10
0
def net(X, verbose=False):
    X = X.as_in_context(W1.context)

    # 第一层卷积
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))

    # 第二层卷积
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    # 第一层全连接
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)

    # 第二层全连接
    h4_linear = nd.dot(h3, W4) + b4

    if verbose:
        print('h1_conv:', h1_conv.shape)
        print('h1_activation:', h1_activation.shape)
        print('1st conv block:', h1.shape)
        print('-------------------\n')

        print('h2_conv:', h2_conv.shape)
        print('h2_activation:', h2_activation.shape)
        print('2nd conv block:', h2.shape)
        print('-------------------\n')

        print('h3_linear:', h3_linear.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
Exemple #11
0
def net(X, is_training=False, verbose=False):
    X = X.as_in_context(W1.context)
    # X = X.flatten().reshape((X.shape[0], X.shape[3], X.shape[1], X.shape[2]))
    # 第一层卷积
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    # 第一层归一化
    h1_bn = batch_norm(h1_conv, gamma1, beta1, is_training, moving_mean1,
                       moving_variance1)
    # 第一层激活函数
    h1_activation = nd.relu(h1_bn)
    # 第一层pooling
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    # 第二层卷积
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_bn = batch_norm(h2_conv, gamma2, beta2, is_training, moving_mean2,
                       moving_variance2)
    h2_activation = nd.relu(h2_bn)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    h2 = nd.flatten(h2)

    # 第一层全连接
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # 第二层全连接
    h4_linear = nd.dot(h3, W4) + b4
    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
def toy_ssd_forward(x, model, sizes, ratios, verbose=False):
    """  定义一个前向传播函数  """
    # 拿到主体网络, 下降模块, 类别预测模块, 边框预测模块
    body, down_samples, class_predictors, box_predictors = model
    # 定义锚点, 类别预测, 边框预测
    anchors, class_preds, box_preds = [], [], []

    # feature extraction
    x = body(x)

    # 5层预测: 原始图像 + 3层减半模块 + 全局Pool池化
    for i in range(5):
        # anchor shape: batch_size x 每幅图像锚框数 x 4, return时, 沿dim = 1 concat.
        anchors.append(MultiBoxPrior(x, sizes=sizes[i], ratios=ratios[i]))
        # class_predictor返回的shape为: batch_size x  channel x height x width, 而后flatten成:batch_sizse x (height x width x channel), return时再concat一起
        class_preds.append(flatten_prediction(class_predictors[i](x)))
        # 同class
        box_preds.append(flatten_prediction(box_predictors[i](x)))

        if verbose:
            print('Predict scale', i, x.shape, 'with', anchors[-1].shape[1], 'anchors')

        if i < 3:
            x = down_samples[i](x)
        elif i == 3:
            x = nd.Pooling(x, global_pool=True, pool_type='max', kernel=(x.shape[2], x.shape[3]))

    # concat data
    return (concat_prediction(*anchors),          # anchors: batch_size x (dim=1, 每层输出concat一起) x 4
            concat_prediction(*class_preds),      # class_preds: batch_size x (dim=1, 每层输出的h * w * c, concat一起)
            concat_prediction(*box_preds))
Exemple #13
0
def toy_ssd_forward(x, model, sizes, ratios, verbose=True):
    body, downsamplers, classpredictors, boxpredictors = model
    anchors, class_preds, box_preds = [], [], []  # null list

    # Caution: how the data flow
    x = body(x)
    for i in range(5):
        anchors.append(MultiBoxPrior(
            x, sizes=sizes[i],
            ratios=ratios[i]))  # generate 4 box per pixel of x, total 5 scales
        class_preds.append(flatten_predict(classpredictors[i](x)))
        box_preds.append(flatten_predict(boxpredictors[i](x)))
        if verbose:
            print('Predict scale', i, x.shape, 'with', anchors[-1].shape[1],
                  'achors')

        if i < 3:
            x = downsamplers[i](x)
        elif i == 3:
            x = nd.Pooling(x,
                           global_pool=True,
                           pool_type='max',
                           kernel=(x.shape[2], x.shape[3]))  # Global kernel

    return (
        concat_predict(anchors),  # the channel dim is the 
        concat_predict(class_preds),
        concat_predict(box_preds))
Exemple #14
0
def toy_ssd_forward(x, model, sizes, ratios, verbose=False):
    body, downsamplers, class_predictors, box_predictors = model
    anchors, class_preds, box_preds = [], [], []

    # feature extraction
    x = body(x)
    for i in range(5):
        # predict
        anchors.append(MultiBoxPrior(x, sizes=sizes[i], ratios=ratios[i]))
        class_preds.append(flatten_prediction(class_predictors[i](x)))
        box_preds.append(flatten_prediction(box_predictors[i](x)))
        # if verbose:
        #     print('Predict scale', i, x.shape, 'with', anchors[-1].shape[1], 'anchors')
        #     print('Predict scale', i, x.shape, 'with', anchors[-1].shape[1], 'anchors')
        # down sample
        if i < 3:
            x = downsamplers[i](x)
        elif i == 3:
            x = nd.Pooling(x,
                           global_pool=True,
                           pool_type='max',
                           kernel=(x.shape[2], x.shape[3]))

    return (concat_predictions(anchors), concat_predictions(class_preds),
            concat_predictions(box_preds))
Exemple #15
0
def model_forward(x, net, down_samples, class_preds, box_preds, cap_transforms,
                  sizes, ratios):
    # extract feature with the body network
    x = net(x)

    # for each scale, add anchors, box and class predictions,
    # then compute the input to next scale
    default_anchors = []
    predicted_boxes = []
    predicted_classes = []

    for i in range(5):
        default_anchors.append(
            MultiBoxPrior(x, sizes=sizes[i], ratios=ratios[i]))
        prime_out = cap_transforms[i](x)
        class_capout = class_preds[i * 2](prime_out)
        class_pred = class_preds[i * 2 + 1](class_capout)
        class_pred = nd.flatten(nd.transpose(class_pred, (0, 2, 3, 1)))
        '''
        class_pred = class_preds[i](x)
        class_pred = nd.flatten(nd.transpose(class_pred, (0,2,3,1)))
        '''

        box_pred = nd.flatten(nd.transpose(box_preds[i](x), (0, 2, 3, 1)))
        # print class_pred.shape, box_pred.shape
        print class_pred.shape
        # print class_pred.shape
        predicted_boxes.append(box_pred)
        predicted_classes.append(class_pred)
        if i < 3:
            x = down_samples[i](x)
        elif i == 3:
            # simply use the pooling layer
            x = nd.Pooling(x, global_pool=True, pool_type='max', kernel=(4, 4))
    return default_anchors, predicted_classes, predicted_boxes
Exemple #16
0
    def ssd_forward(self, x):
        '''
        Helper function of the forward pass of the sdd
        '''
        x = self.body(x)

        default_anchors = []
        predicted_boxes = []
        predicted_classes = []

        for i in range(self.num_anchors):
            default_anchors.append(
                MultiBoxPrior(x,
                              sizes=self.anchor_sizes[i],
                              ratios=self.anchor_ratios[i]))
            predicted_boxes.append(
                self._flatten_prediction(self.box_preds[i](x)))
            predicted_classes.append(
                self._flatten_prediction(self.class_preds[i](x)))
            if i < len(self.downsamples):
                x = self.downsamples[i](x)
            elif i == 3:
                x = nd.Pooling(x,
                               global_pool=True,
                               pool_type='max',
                               kernel=(4, 4))
        return default_anchors, predicted_classes, predicted_boxes
def _nms(heat, kernel=3):
    pad = (kernel - 1) // 2

    hmax = nd.Pooling(data=heat,
                      kernel=(kernel, kernel),
                      stride=(1, 1),
                      pad=(pad, pad))  # default is max pooling
    keep = (hmax == heat).astype('float32')
    return heat * keep
def net(X, verbose=False):
    X = X.as_in_context(W1.context)
    # first convolution layer
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)

    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    # print(h1.shape)
    # second convolution layer
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    # print(h2.shape)

    h2 = nd.flatten(h2)

    # first layer fully connected
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # second layer fully connected
    h4_linear = nd.dot(h3, W4) + b4

    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
Exemple #19
0
 def relevance_layerwise(self, out, *args, **kwargs):
     R = out
     a = self._in[0]
     pkwargs = self._kwargs.copy()
     pkwargs['pool_type'] = 'sum'
     # suppress mxnet warnings about sum-pooling nob being supported with cudnn
     pkwargs['cudnn_off'] = True
     a.attach_grad()
     with autograd.record():
         z = nd.Pooling(a, **pkwargs)
     z.backward(out_grad=R / (z + (z == 0.)))
     return a * a.grad
Exemple #20
0
def Net(X, is_training=False, verbose=False):
    # 第一层卷积
    h1_conv = nd.Convolution(X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=c1)
    h1_bn = batch_norm(h1_conv, gamma1, beta1, is_training, moving_mean1,
                       moving_variance1)
    h1_activation = nd.Activation(h1_bn, act_type='relu')
    h1 = nd.Pooling(h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    # second Convolution
    h2_conv = nd.Convolution(h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=c2)
    h2_bn = batch_norm(h2_conv, gamma2, beta2, is_training, moving_mean2,
                       moving_variance2)
    h2_activation = nd.relu(h2_bn)
    h2 = nd.Pooling(h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    h4_linear = nd.dot(h3, W4) + b4
    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense block:', h3.shape)
        print('2nd dense block:', h4_linear.shape)
        print('output:', h4_linear)

    return h4_linear
Exemple #21
0
def net(X, Verbose=False):
    X = X.as_in_context(W1.context)  #将X的存储位置与W1一致
    #第一层卷积
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    #第二层卷积
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = h2.flatten()
    #第三层全链接
    h3 = nd.relu(nd.dot(h2, W3) + b3)
    #第四层全链接
    h4 = nd.dot(h3, W4) + b4
    if Verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4.shape)
        print('output:', h4)
    return h4
def net(x, is_training=False):
    x = x.as_in_context(w1.context)
    # print('x shape: ', x.shape)
    h1_conv = nd.Convolution(data=x,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=c1)
    h1_bn = batch_norm(h1_conv, gamma_1, beta_1, is_training, moving_mean_1,
                       moving_variance_1)
    h1_activation = nd.relu(h1_bn)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=w2,
                             bias=b2,
                             kernel=w2.shape[2:],
                             num_filter=c2)
    h2_bn = batch_norm(h2_conv, gamma_2, beta_2, is_training, moving_mean_2,
                       moving_variance_2)
    h2_activation = nd.relu(h2_bn)
    h2 = nd.Pooling(h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = h2.flatten()

    h3_linear = nd.dot(h2, w3) + b3
    h3 = nd.relu(h3_linear)

    h4_linear = nd.dot(h3, w4) + b4

    return h4_linear
Exemple #23
0
    def toy_ssd_forward(self, x, body, downsamples, class_preds, box_preds, sizes, ratios):
        # extracted features
        x = body(x)

        default_anchors = []
        predicted_boxes = []
        predicted_classes = []
        for i in range(5):
            default_anchors.append(MultiBoxPrior(x, sizes=sizes[i], ratios=ratios[i]))
            predicted_boxes.append(self.flatten_prediction(box_preds[i](x)))
            predicted_classes.append(self.flatten_prediction(class_preds[i](x)))
            if i < 3:
                x = downsamples[i](x)
            elif i == 3:
                x = nd.Pooling(x, global_pool=True, pool_type='max', kernel=(4, 4))
        return default_anchors, predicted_classes, predicted_boxes
Exemple #24
0
def mobile_net_forward(x, body, downsamples, class_preds, box_preds, sizes,
                       ratios):
    x = body(x)
    default_anchors = []
    predicted_boxes = []
    predicted_classes = []
    for i in range(5):
        default_anchors.append(MultiBoxPrior(x, sizes[i], ratios=ratios[i]))
        predicted_boxes.append(flatten_prediction(box_preds[i](x)))
        predicted_classes.append(flatten_prediction(class_preds[i](x)))
        # print(predicted_classes[i].shape)
        if i < 3:
            x = downsamples[i](x)
        elif i == 3:
            x = nd.Pooling(x, global_pool=True, pool_type='max', kernel=(4, 4))
    return default_anchors, predicted_boxes, predicted_classes
Exemple #25
0
    def max_pool2d(data, pool_size, strides, padding, ceil_mode):
        data_nd = nd.array(data)
        pad = padding
        if len(padding) == 1:
            pad = (padding[0], padding[0])
        out = nd.Pooling(data_nd,
                         pool_size,
                         pool_type="max",
                         global_pool=False,
                         stride=strides,
                         pad=pad)

        data_npy = np.array(data)
        ashape = data_npy.shape
        n, ic, ih, iw = data_npy.shape
        sh, sw = strides
        kh, kw = pool_size
        bshape = [n, ic, ih, iw]
        if len(padding) == 1:
            pt = pl = pb = pr = padding[0]
        else:
            pt = pb = padding[0]
            pl = pr = padding[1]
        if ceil_mode:
            pb += sh - 1
            pr += sw - 1
        pad_np = np.full((n, ic, ih + pt + pb, iw + pl + pr),
                         -127).astype(INT32)
        # pad_np = np.zeros(shape=(n, ic, ih+pt+pb, iw+pl+pr)).astype(INT32)
        no_zero = (range(n), range(ic), (range(pt,
                                               ih + pt)), (range(pl, iw + pl)))
        pad_np[np.ix_(*no_zero)] = data_npy
        bshape[2] = int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1)
        bshape[3] = int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1)
        if pt >= kh or (bshape[2] - 1) * sh - pt >= ashape[2]:
            raise ValueError("ceil_mode exceed out of input")
        if pl >= kw or (bshape[3] - 1) * sw - pl >= ashape[3]:
            raise ValueError("ceil mode exceed out of input")
        _, oc, oh, ow = bshape
        b_np = np.zeros(shape=(n, oc, oh, ow)).astype(INT32)
        for i in range(oh):
            for j in range(ow):
                b_np[:, :, i, j] = np.max(pad_np[:, :, i * sh:i * sh + kh,
                                                 j * sw:j * sw + kw],
                                          axis=(2, 3))
        return [b_np]
    def forward(self, x):
        # MXNet nd.Convolution takes in a 4D Tensor of [nSamples x nChannels x Height x Width]
        x = mx.nd.array(x)

        layer = 0
        for w in self.conv_filters_:
            x = nd.Convolution(data=x,
                               weight=w,
                               kernel=w.shape[-2:],
                               num_filter=w.shape[0],
                               no_bias=True)
            if self.poolings_:
                window = (self.poolings_[layer], self.poolings_[layer])
                x = nd.Pooling(data=x,
                               pool_type="max",
                               kernel=window,
                               stride=window)
                layer += 1

        return x.asnumpy()
Exemple #27
0
def toy_ssd_forward(x, body, downsamples, class_preds, box_preds, sizes,
                    ratios):
    # extract feature with the body network
    x = body(x)
    # for each scale, add anchors, box and class predictions,
    # then compute the input to next scale
    default_anchors = []
    predicted_boxes = []
    predicted_classes = []
    for i in range(5):
        default_anchors.append(
            MultiBoxPrior(x, sizes=sizes[i], ratios=ratios[i]))
        predicted_boxes.append(flatten_prediction(box_preds[i](x)))
        predicted_classes.append(flatten_prediction(class_preds[i](x)))
        if i < 3:
            x = downsamples[i](x)
        elif i == 3:
            # simply use the pooling layer
            x = nd.Pooling(x, global_pool=True, pool_type='max', kernel=(4, 4))
    return default_anchors, predicted_classes, predicted_boxes
Exemple #28
0
def net(X, verbose=False):
	X = X.as_in_context(W1.context) #将X转到和W1一样的ctx上
	#第一层卷积
	h1_cov = nd.Convolution(data=X, weight=W1, bias=b1, kernel=W1.shape[2:], num_filter=W1.shape[0])
	h1_activation = nd.relu(h1_cov)
	h1 = h1.Pooling(data=h1_activation, pool_type="max", kernel=(2,2), stride=(2,2))
	#第二层卷积
	h2_cov = nd.Convolution(data=h1, weight=W2, bias=b2, kernel=W2.shape[2:], num_filter=W2.shape[0])
	h2_activation = nd.relu(h2_cov)
	h2 = nd.Pooling(data=h2_activation, pool_type="max", kernel=(2,2), stride=(2,2))
	#拉平,变为全连接
	h2 = nd.flatten(h2)
	#第一层全连接
	h3_linear = nd.dot(h2, W3) + b3
	h3 = nd.relu(h3_linear)
	#第四层全连接
	h4_linear = nd.dot(h3, W4) + b4
	#用于打印网络规模
	if verbose:
		print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
Exemple #29
0
w = nd.arange(16).reshape((2, 2, 2, 2))
print("w = ", w)
b = nd.array([1, 2])

data = nd.arange(18).reshape((1, 2, 3, 3))
print('data = ', data)
out = nd.Convolution(data, w, b, kernel=w.shape[2:], num_filter=w.shape[0])
print('output:', out)
print('w.shape[2:]:', w.shape[2:])
print('w.shape[0]:', w.shape[0])

# Pooling

data = nd.arange(18).reshape((1, 2, 3, 3))

max_pool = nd.Pooling(data=data, pool_type='max', kernel=(2, 2))
avg_pool = nd.Pooling(data=data, pool_type='avg', kernel=(2, 2))
print('max pooling = ', max_pool)
print('avg pooling = ', avg_pool)


import sys
from utils import load_data_fashion_mnist

batch_size = 256
train_data, test_data = load_data_fashion_mnist(batch_size)

# define model
import mxnet as mx
ctx = mx.cpu()
_ = nd.zeros((1, ), ctx = ctx)
params = [W1, b1, W2, b2, W3, b3, W4, b4]

for param in params:
    param.attach_grad()

for data, _ in train_data:
    data = data.as_in_context(ctx)
    break
conv = nd.Convolution(data=data,
                      weight=W1,
                      bias=b1,
                      kernel=(3, 3),
                      num_filter=20)
print(conv.shape)

pool = nd.Pooling(data=conv, pool_type="max", kernel=(2, 2), stride=(2, 2))
print(pool.shape)


def relu(X):
    return nd.maximum(X, nd.zeros_like(X))


def softmax(y_linear):
    exp = nd.exp(y_linear - nd.max(y_linear))
    partition = nd.sum(exp, axis=0, exclude=True).reshape((-1, 1))
    return exp / partition


def softmax_cross_entropy(yhat_linear, y):
    return -nd.nansum(y * nd.log_softmax(yhat_linear), axis=0, exclude=True)