Example #1
0
    def function_set(self):
        # 第一层卷积
        # 卷积
        h1_conv = nd.Convolution(
            data=self.__batch_X, weight=self.__W1, bias=self.__b1, kernel=self.__W1.shape[2:], num_filter=self.__W1.shape[0])
        # 激活
        h1_activation = nd.relu(h1_conv)
        # 池化
        h1 = nd.Pooling(data=h1_activation, pool_type="max", kernel=(2, 2), stride=(2, 2))
        # 第二层卷积
        h2_conv = nd.Convolution(
            data=h1, weight=self.__W2, bias=self.__b2, kernel=self.__W2.shape[2:], num_filter=self.__W2.shape[0])
        h2_activation = nd.relu(h2_conv)
        h2 = nd.Pooling(data=h2_activation, pool_type="max", kernel=(2, 2), stride=(2, 2))
        h2 = nd.flatten(h2)
        # 第一层全连接
        h3_linear = nd.dot(h2, self.__W3) + self.__b3
        h3 = nd.relu(h3_linear)
        # 第二层全连接
        h4_linear = nd.dot(h3, self.__W4) + self.__b4

        # print("1st conv block:", h1.shape)
        # print("2nd conv block:", h2.shape)
        # print("1st dense:", h3.shape)
        # print("2nd dense:", h4_linear.shape)
        # print("output:", h4_linear)

        return h4_linear
def net(X, verbose=False):
    X = X.as_in_context(W1.context)
    # 第一层卷积
    h1_conv = nd.Convolution(data=X, weight=W1, bias=b1, kernel=W1.shape[2:], num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation, pool_type='max', kernel=(2, 2), stride=(2, 2))

    # 第二层卷积
    h2_conv = nd.Convolution(data=h1, weight=W2, bias=b2, kernel=W2.shape[2:], num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(h2_activation, pool_type="max", kernel=(2, 2), stride=(2, 2))
    h2 = nd.flatten(h2)

    # 第一层全连接
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)

    # 第二层全连接
    h4_linear = nd.dot(h3, W4) + b4
    if verbose:
        print('1st conv block', h1.shape)
        print('2nd conv block', h2.shape)
        print('1st conv block', h3.shape)
        print('2nd conv block', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
    def function_set(self):
        def batch_norm(X, gamma, beta, is_training, moving_mean, moving_variance, eps=1e-5, moving_momentum=0.9):
            assert len(X.shape) in (2, 4)
            # 全连接: batch_size x feature
            if len(X.shape) == 2:
                # 每个输入维度在样本上的平均和方差
                mean = X.mean(axis=0)
                variance = ((X - mean) ** 2).mean(axis=0)
            # 2D卷积: batch_size x channel x height x width
            else:
                # 对每个通道算均值和方差,需要保持 4D 形状使得可以正确的广播
                mean = X.mean(axis=(0, 2, 3), keepdims=True)
                variance = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)
                # 变形使得可以正确的广播
                moving_mean = moving_mean.reshape(mean.shape)
                moving_variance = moving_variance.reshape(mean.shape)
            # 均一化
            if is_training:
                X_hat = (X - mean) / nd.sqrt(variance + eps)
                # !!! 更新全局的均值和方差
                # 每一个 batch_X 都会使用上个 batch_X 的 0.9 与 这个 batch_X 的 0.1
                moving_mean[:] = moving_momentum * moving_mean + (1.0 - moving_momentum) * mean
                moving_variance[:] = moving_momentum * moving_variance + (1.0 - moving_momentum) * variance
            else:
                # !!! 测试阶段使用全局的均值和方差
                X_hat = (X - moving_mean) / nd.sqrt(moving_variance + eps)

            # 拉升和偏移
            return gamma.reshape(mean.shape) * X_hat + beta.reshape(mean.shape)

        # 第一层卷积
        h1_conv = nd.Convolution(
            data=self.__batch_X, weight=self.__W1, bias=self.__b1, kernel=(5, 5), num_filter=20)
        # 第一个 BN
        h1_bn = batch_norm(
            h1_conv, self.__gamma1, self.__beta1, self.__is_training, self.__moving_mean1, self.__moving_variance1)
        h1_activation = nd.relu(h1_bn)
        h1 = nd.Pooling(
            data=h1_activation, pool_type="max", kernel=(2, 2), stride=(2, 2))

        # 第二层卷积
        h2_conv = nd.Convolution(
            data=h1, weight=self.__W2, bias=self.__b2, kernel=(3, 3), num_filter=50)
        # 第二个 BN
        h2_bn = batch_norm(
            h2_conv, self.__gamma2, self.__beta2, self.__is_training, self.__moving_mean2, self.__moving_variance2)
        h2_activation = nd.relu(h2_bn)
        h2 = nd.Pooling(data=h2_activation, pool_type="max", kernel=(2, 2), stride=(2, 2))
        h2 = nd.flatten(h2)

        # 第一层全连接
        h3_linear = nd.dot(h2, self.__W3) + self.__b3
        h3 = nd.relu(h3_linear)

        # 第二层全连接
        h4_linear = nd.dot(h3, self.__W4) + self.__b4

        return h4_linear
Example #4
0
def sc(X, W, attr):
    xshp, wshp = X.shape, W.shape
    C, OC, IC = xshp[1], wshp[0], wshp[1]
    assert C >= IC and C % IC == 0 and C // IC == eval(attr['num_group'])
    num_group = C // IC
    assert num_group == eval(attr['num_group']) and \
        OC >= num_group and OC % num_group == 0
    xs = sym_slice(X, 1, 1)
    ws = kernel_slice_2d(W)
    OPG = OC // num_group
    nattr = attr.copy()
    nattr['num_group'] = '1'
    nattr['num_filter'] = '1'
    nodes = []
    for o in range(OC):
        nnodes = []
        j = int(o/OPG)*IC
        for i in range(IC):
            xoi, woi = xs[i+j], ws[o][i]
            yoi = nd.Convolution(xoi, woi, **nattr)
            nnodes.append(yoi)
        if len(nnodes) > 1:
            zi = nd.add_n(*nnodes)
        else:
            zi = nnodes[0]
        nodes.append(zi)
    return nd.concat(*nodes, dim=1)
Example #5
0
def test_conv2d():
    print("test conv2d")
    batch = np.random.randint(low=1, high=32)
    i_c = np.random.randint(low=1, high=32)
    i_h = np.random.randint(low=7, high=256)
    i_w = np.random.randint(low=7, high=256)
    xshape = (batch, i_c, i_h, i_w)
    print(xshape)
    x = np.random.randint(low=-127, high=127, size=xshape)
    o_c = np.random.randint(low=1, high=1024)
    f_h = 3
    f_w = 3
    wshape = (o_c, i_c, f_h, f_w)
    print(wshape)
    w = np.random.randint(low=-127, high=127, size=wshape)

    stride = (1, 1)
    padding = (0, 0)
    dilation = (1, 1)
    kernel_size = (f_h, f_w)
    o_h = (i_h + 2 * padding[0] - f_h) / stride[0] + 1
    o_w = (i_w + 2 * padding[1] - f_w) / stride[1] + 1

    oshape = (batch, o_c, o_h, o_w)
    params = {
        'stride': stride,
        'pad': padding,
        'dilate': dilation,
        'kernel': kernel_size,
        'num_filter': o_c
    }
    y = nd.Convolution(nd.array(x), nd.array(w), None, **params)
 def _compute_block_mask(self, mask):
     weight_mat = nd.ones(48 * self.block_size * self.block_size)
     weight_mat = weight_mat.reshape(
         [48, 1, self.block_size, self.block_size])
     block_mask = nd.Convolution(data=mask,
                                 no_bias=True,
                                 weight=weight_mat,
                                 num_filter=48,
                                 kernel=(3, 3),
                                 pad=(int(np.ceil(self.block_size) / 2 + 1),
                                      int(np.ceil(self.block_size) / 2 +
                                          1)))
     # compute mask area
     delta = self.block_size // 2
     input_height = mask.shape[2] + delta * 2
     input_width = mask.shape[3] + delta * 2
     height_to_crop = block_mask.shape[2] - input_height
     width_to_crop = block_mask.shape[3] - input_width
     #print height_to_crop
     #print width_to_crop
     if height_to_crop != 0:
         block_mask = block_mask[:, :, :-height_to_crop, :]
     if width_to_crop != 0:
         block_mask = block_mask[:, :, :, :-width_to_crop]
     block_mask = 1 - block_mask
     return block_mask
def fwd_conv2d_groupwise(*data, **attrs):
    assert len(data) == 2
    num_group = attrs['num_group']
    assert num_group == 1, \
        "currently only support num_group = 1, provided {}".format(num_group)
    X, W = data
    # reshape weight
    OC, IC = W.shape[:2]
    W = nd.transpose(W, axes=(1, 0, 2, 3))
    rshp = (
        OC * IC,
        1,
    ) + W.shape[2:]
    W = W.reshape(rshp)
    # convert to groupwise conv
    attrs['num_group'] = IC
    attrs['num_filter'] = IC * OC
    out = nd.Convolution(X, W, **attrs)
    # sum axis
    YH, YW = out.shape[-2:]
    N = X.shape[0]
    rshp = (N, IC, OC, YH, YW)
    out = out.reshape(rshp)
    out = nd.sum(out, axis=1)
    return out
Example #8
0
def net(X, verbose=False):
    X = X.as_in_context(W1.context)
    # 第一个卷积层
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=W1.shape[2:],
                             num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))

    # h1_conv.shape:  (256, 20, 24, 24)
    # h1.shape:  (256, 20, 12, 12)
    #print('h1_conv.shape: ',h1_conv.shape)
    #print('h1.shape: ',h1.shape)

    # 第二个卷积层
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=W2.shape[2:],
                             num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    # 第一个全连接层
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)

    # 第二个全连接层
    h4_linear = nd.dot(h3, W4) + b4
    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense block:', h3.shape)
        print('2nd dense block:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
Example #9
0
    def forward(self, x):
        inp = x.shape[1]
        oup = self.width_opt[self.idx]

        x = nd.Convolution(x, weight=self.conv_weight.data()[:oup,:inp,:,:], kernel=(3, 3), stride=self.stride, pad=(1, 1), num_filter=oup, no_bias=True)
        x = nd.BatchNorm(x, self.gamma[self.idx].data(), self.beta[self.idx].data(), self.moving_mean[self.idx].data(), self.moving_var[self.idx].data())
        x = nd.Activation(x, act_type='relu')

        return x
Example #10
0
    def network(self, X=None, debug=False,):
                
        filters, kernels, stride, padding, dilate = self.conv_params['num_filter'], self.conv_params['kernel'], \
                                                    self.conv_params['stride'], self.conv_params['padding'], self.conv_params['dilate']
        type_pool, kernels_pool, stride_pool, padding_pool, dilate_pool =  self.pool_params['pool_type'], \
                                                                           self.pool_params['kernel'], self.pool_params['stride'], \
                                                                           self.pool_params['padding'], self.pool_params['dilate']
        act_type = self.act_params['act_type']
        hidden_dim = self.fc_params['hidden_dim']
        
        
        # CNN ##########################################################################################################
        convlayer_out = X
        interlayer = []
        for i, (nf, k, S, P, D, t_p, k_p, S_p, P_p, D_p, a) in enumerate(zip(filters, kernels, stride, padding, dilate, 
                                                                     type_pool, kernels_pool, stride_pool, padding_pool, dilate_pool,
                                                                     act_type)):
            W, b = self.params['W{:d}'.format(i+1,)], self.params['b{:d}'.format(i+1,)]
            convlayer_out = nd.Convolution(data = convlayer_out, weight=W, bias=b, kernel=k, num_filter=nf, stride=S, dilate=D)
            convlayer_out = activation(convlayer_out, act_type = a)
            convlayer_out = nd.Pooling(data=convlayer_out, pool_type=t_p, kernel=k_p, stride=S_p, pad=P_p)

            interlayer.append(convlayer_out)
            i_out = i
            if debug:
                print("layer{:d} shape: {}".format(i+1, convlayer_out.shape))
        
        # MLP ##########################################################################################################
        FClayer_out = nd.flatten(convlayer_out)
        interlayer.append(FClayer_out)
        if debug:
            print("After Flattened, Data shape: {}".format(FClayer_out.shape))

        for j, (hd, a) in enumerate(zip(hidden_dim, act_type[-len(hidden_dim):])):
            W, b = self.params['W{:d}'.format(j+i_out+2,)], self.params['b{:d}'.format(j+i_out+2,)]
            FClayer_out = nd.dot(FClayer_out, W) + b
            FClayer_out = activation(FClayer_out, act_type = a)
            
            if autograd.is_training():
                # 对激活函数的输出使用droupout
                FClayer_out = dropout(FClayer_out, self.drop_prob)
            if debug:
                print("layer{:d} shape: {}".format(j+i_out+2, FClayer_out.shape))
            interlayer.append(FClayer_out)            
            j_out = j
            
        # OUTPUT ##########################################################################################################
        W, b = self.params['W{:d}'.format(j_out+i_out+3,)], self.params['b{:d}'.format(j_out+i_out+3,)]            
        yhat = nd.dot(FClayer_out, W) + b

        if debug:
            print("Output shape: {}".format(yhat.shape))
            print('------------')
        interlayer.append(yhat)       

        return yhat, interlayer
def net(x, is_training=False, verbose=False):
    x = x.as_in_context(w1.context)
    h1_conv = nd.Convolution(data=x,
                             weight=w1,
                             bias=b1,
                             kernel=w1.shape[2:],
                             num_filter=c1)
    h1_bn = utils.batch_norm(h1_conv, gamma1, beta1, is_training, moving_mean1,
                             moving_variance1)
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))

    h2_conv = nd.Convolution(data=h1,
                             weight=w2,
                             bias=b2,
                             kernel=w2.shape[2:],
                             num_filter=c2)
    h2_bn = utils.batch_norm(h2_conv, gamma2, beta2, is_training, moving_mean2,
                             moving_variance2)
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type='max',
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)

    h3_linear = nd.dot(h2, w3) + b3
    h3 = nd.relu(h3_linear)

    h4_linear = nd.dot(h3, w4) + b4

    if verbose:
        print('h1 conv block: ', h1.shape)
        print('h2 conv block: ', h2.shape)
        print('h3 conv block: ', h3.shape)
        print('h4 conv block: ', h4_linear.shape)
        print('output: ', h4_linear)

    return h4_linear.as_in_context(ctx)
Example #12
0
def sc(X, W, attr, ichannel, step):
    xshp = X.shape
    xs = sym_slice(X, ichannel, step)
    ws = sym_slice(W, ichannel, step)
    nodes = []
    j = 0
    for i in range(0, xshp[ichannel], step):
        yi = nd.Convolution(xs[j], ws[j], **attr)
        nodes.append(yi)
        j += 1
    return nd.add_n(*nodes)
Example #13
0
 def match_templates(self, net_z, net_x):
     # B C H W
     Bz, Cz, Hz, Wz = net_z.shape
     Bx, Cx, Hx, Wx = net_x.shape
     net_final_ = nd.Convolution(data=net_x,
                                 weight=net_z,
                                 num_filter=1,
                                 kernel=[Hz, Wz],
                                 no_bias=True)
     net_final_ = self.bn_final(net_final_)
     net_final_ = np.transpose(net_final_, axes=(1, 2, 3, 0))
     net_final = net_final_[0]
     return net_final
Example #14
0
    def network(X,drop_rate=0.0): # formula : output_size=((input−weights+2*Padding)/Stride)+1
        #data size 
        # MNIST,FashionMNIST = (batch size , 1 , 28 ,  28)
        # CIFAR = (batch size , 3 , 32 ,  32)

        C_H1=nd.Activation(data= nd.Convolution(data=X , weight = W1 , bias = B1 , kernel=(3,3) , stride=(1,1)  , num_filter=60) , act_type="relu") # MNIST : result = ( batch size , 60 , 26 , 26) , CIFAR10 : : result = ( batch size , 60 , 30 , 30) 
        P_H1=nd.Pooling(data = C_H1 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 60 , 13 , 13) , CIFAR10 : result = (batch size , 60 , 15 , 15)
        C_H2=nd.Activation(data= nd.Convolution(data=P_H1 , weight = W2 , bias = B2 , kernel=(6,6) , stride=(1,1) , num_filter=30), act_type="relu") # MNIST :  result = ( batch size , 30 , 8 , 8), CIFAR10 :  result = ( batch size , 30 , 10 , 10)
        P_H2=nd.Pooling(data = C_H2 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 30 , 4 , 4) , CIFAR10 : result = (batch size , 30 , 5 , 5)
        P_H2 = nd.flatten(data=P_H2)

        '''FullyConnected parameter
        • data: (batch_size, input_dim)
        • weight: (num_hidden, input_dim)
        • bias: (num_hidden,)
        • out: (batch_size, num_hidden)
        '''
        F_H1 =nd.Activation(nd.FullyConnected(data=P_H2 , weight=W3 , bias=B3 , num_hidden=120),act_type="sigmoid")
        F_H1 =nd.Dropout(data=F_H1, p=drop_rate)
        F_H2 =nd.Activation(nd.FullyConnected(data=F_H1 , weight=W4 , bias=B4 , num_hidden=64),act_type="sigmoid")
        F_H2 =nd.Dropout(data=F_H2, p=drop_rate)
        softmax_Y = nd.softmax(nd.FullyConnected(data=F_H2 ,weight=W5 , bias=B5 , num_hidden=10))
        return softmax_Y
Example #15
0
def test_ord1():
    a = nd.array(np.random.random((16, 32, 28, 28)))
    # a = nd.array([-1,2]).reshape((1,2,1,1))
    # print("x", a)
    b = nd.array(np.random.random((4, 32, 1, 1)))
    # b = nd.array([3,5,7,9]).reshape((2,2,1,1))
    # print("w", b)
    c = nd.Convolution(a,
                       b,
                       no_bias=True,
                       num_group=1,
                       kernel=(1, 1),
                       num_filter=4)
    # print("conv", c)
    # import group_conv as gconv
    # attrs = {
    # 'num_group': '1',
    # 'kernel': '(1,1)',
    # 'no_bias': 'True',
    # 'num_filter': '2',
    # }
    # nodes = gconv.sc(a, b, attrs)
    # print("nodes", nodes)

    b1 = nd.transpose(b, axes=(1, 0, 2, 3))
    b1 = b1.reshape((128, 1, 1, 1))
    c1 = nd.Convolution(a,
                        b1,
                        no_bias=True,
                        num_group=32,
                        kernel=(1, 1),
                        num_filter=128)
    c1 = c1.reshape((16, 32, 4, 28, 28))
    # print("gconv", c1)
    c1 = nd.sum(c1, axis=1)
    # print("sum", c1)
    utils.check_valid(c, c1, tol=1e-5)
Example #16
0
def net_lenet(X, verbose=False):
    # 第一层卷积
    h1_conv = nd.Convolution(data=X,
                             weight=lenet_W1,
                             bias=lenet_b1,
                             kernel=lenet_W1.shape[2:],
                             num_filter=lenet_W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    # 第二层卷积
    h2_conv = nd.Convolution(data=h1,
                             weight=lenet_W2,
                             bias=lenet_b2,
                             kernel=lenet_W2.shape[2:],
                             num_filter=lenet_W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type="max",
                    kernel=(2, 2),
                    stride=(2, 2))
    h2 = nd.flatten(h2)
    # 第一层全连接
    h3_linear = nd.dot(h2, lenet_W3) + lenet_b3
    h3 = nd.relu(h3_linear)
    # 第二层全连接
    h4_linear = nd.dot(h3, lenet_W4) + lenet_b4
    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear
Example #17
0
def new_flow(Y, nuclei=None, device=mx.cpu()):
    w = nd.ones((1, 1, 3, 3), ctx=device)
    bias = nd.zeros((32, ), ctx=device)
    bias0 = nd.zeros((1, ), ctx=device)
    #w[0,0,2,2] = 0
    #w[0,0,0,0] = 0
    #w[0,0,0,2] = 0
    #w[0,0,2,0] = 0

    w = w / nd.sum(w)
    Ly, Lx = Y.shape
    mu = np.zeros((2, Ly, Lx))
    edge = np.zeros((Ly, Lx))
    unq = np.unique(Y)
    nmask = len(unq) - 1

    _, N = np.unique(Y, return_counts=True)
    R = np.median(N[1:]**.5)
    #print(R)

    for j in range(nmask):
        mask = (Y == unq[j + 1])
        y, x = (Y == unq[j + 1]).nonzero()

        y0 = np.min(y)
        x0 = np.min(x)

        if nuclei is not None:
            M = nuclei[y, x]
            M = M - M.min() + 1e-3
            M = M / M.sum()
            ymed = np.round(np.dot(M, (y - y0))).astype('int32')
            xmed = np.round(np.dot(M, (x - x0))).astype('int32')

        y = y - y0
        x = x - x0
        Ly, Lx = np.max(y) + 1, np.max(x) + 1

        T0 = nd.zeros((1, 1, Ly + 2, Lx + 2), ctx=device)
        T0[0, 0, y + 1, x + 1] = 1

        ff = T0 * (nd.Convolution(
            T0, w, bias0, kernel=(3, 3), pad=(1, 1), num_filter=1) < .95)
        ybound, xbound = np.nonzero(ff[0, 0].asnumpy())
        ds = ((y[:, np.newaxis] + 1 - ybound)**2 +
              (x[:, np.newaxis] + 1 - xbound)**2)**.5
        dmin = np.min(ds, axis=1)
        edge[y + y0, x + x0] = dmin

        #imin = np.argmin( - np.min(ds, axis=1))

        if nuclei is None:
            if False:
                mask = nd.zeros((Ly + 2, Lx + 2), ctx=device)
                mask[y + 1, x + 1] = 1

                T0 = nd.zeros((1, 1, Ly + 2, Lx + 2), ctx=device)
                T0[0, 0, y + 1, x + 1] = 1

                for j in range(Ly + Lx):
                    T0 = nd.Convolution(T0,
                                        w,
                                        bias0,
                                        kernel=(3, 3),
                                        pad=(1, 1),
                                        num_filter=1)
                    T0 = T0 * mask
                    T0 = T0 / T0.mean()
                xy = np.unravel_index(
                    np.argmax((mask * T0[0, 0]).asnumpy().squeeze()),
                    mask.shape)
                ymed, xmed = xy[0] - 1, xy[1] - 1
            elif False:
                ydist = y
                xdist = x
                if len(ydist) > 200:
                    ix = np.random.permutation(len(y))[:200]
                    ydist = ydist[ix]
                    xdist = xdist[ix]
                ds = ((ydist - y[:, np.newaxis])**2 +
                      (xdist - x[:, np.newaxis])**2)**.5
                imin = np.argmin(np.mean(ds, axis=1))
                ymed = y[imin]
                xmed = x[imin]
            else:
                ymed = int(np.median(y))
                xmed = int(np.median(x))
                imin = np.argmin((x - xmed)**2 + (y - ymed)**2)
                xmed = x[imin]
                ymed = y[imin]

        mask = nd.zeros((1, 1, Ly + 2, Lx + 2), ctx=device)
        T0 = nd.zeros((1, 1, Ly + 2, Lx + 2), ctx=device)
        T0[0, 0, ymed + 1, xmed + 1] += 1.
        mask[0, 0, y + 1, x + 1] = 1

        T = T0.copy()
        T = nd.zeros((1, 1, Ly + 2, Lx + 2), ctx=device)  # T0.copy()
        for t in range(Ly + Lx):
            T = T * mask + T0
            T = nd.Convolution(T,
                               w,
                               bias0,
                               kernel=(3, 3),
                               pad=(1, 1),
                               num_filter=1)

        tnp = T[0, 0].asnumpy()

        dx = tnp[1:-1, 2:] - tnp[1:-1, :-2]
        dy = tnp[2:, 1:-1] - tnp[:-2, 1:-1]
        D = np.stack((dx, dy))

        mu[:, y + y0, x + x0] = mu[:, y + y0, x + x0] + D[:, y, x]
    mu = mu / (1e-20 + np.sum(mu**2, axis=0)**.5)

    return mu[0], mu[1], edge
Example #18
0
def std(X, W, attr):
    return nd.Convolution(X, W, **attr)
Example #19
0
def test_gen(step=2,
             NG=1,
             OPG=4,
             IPG=8,
             N=16,
             H=28,
             W=28,
             KH=1,
             KW=1,
             PH=0,
             PW=0,
             SH=1,
             SW=1,
             DH=1,
             DW=1,
             tol=1e-6,
             th=2**8):
    # for temporory
    assert NG == 1
    YH = (H + 2 * PH - DH * (KH - 1) - 1) // SH + 1
    YW = (W + 2 * PW - DW * (KW - 1) - 1) // SW + 1
    # for reference
    C = IPG * NG
    O = OPG * NG
    X = utils.generate_data_int((N, C, H, W), th=th)
    W = utils.generate_data_int((O, IPG, KH, KW), th=th)
    attrs = {
        'no_bias': True,
        'num_group': NG,
        'kernel': (KH, KW),
        'num_filter': O,
        'stride': (SH, SW),
        'pad': (PH, PW),
        'layout': 'NCHW',
        'dilate': (DH, DW),
    }
    Y = nd.Convolution(X, W, **attrs)

    # transpose and reshape W
    assert IPG % step == 0, "invalid step: {}".format(step)
    # (O,IPG,KH,KW) --transpose--> (IPG,O,KH,KW)
    W1 = nd.transpose(W, axes=(1, 0, 2, 3))
    NIPG = IPG // step
    # (IPG,O,KH,KW) --reshape--> (NIPG,step,O,KH,KW)
    W1 = nd.reshape(W1, shape=(NIPG, step, O, KH, KW))
    # (NIPG,step,O,KH,KW) --transpose--> (NIPG,O,step,KH,KW)
    W1 = nd.transpose(W1, axes=(0, 2, 1, 3, 4))
    NO = NIPG * O
    # (NIPG,O,step,KH,KW) --reshape--> (NO,step,KH,KW)
    W1 = nd.reshape(W1, shape=(NO, step, KH, KW))
    NNG = C // step
    nattrs = {
        'no_bias': True,
        'num_group': NNG,
        'kernel': (KH, KW),
        'num_filter': NO,
        'stride': (SH, SW),
        'pad': (PH, PW),
        'layout': 'NCHW',
        'dilate': (DH, DW),
    }
    Y1 = nd.Convolution(X, W1, **nattrs)
    # (N,NO,YH,YW) --reshape--> (N,NIPG,O,YH,YW)
    Y1 = nd.reshape(Y1, shape=(N, NIPG, O, YH, YW))
    # (N,NIPG,O,YH,YW) --sum--> (N,O,YH,YW)
    Y1 = nd.sum(Y1, axis=1)
    utils.check_valid(Y, Y1, tol=tol)
Example #20
0
def net_PLB(X,
            params,
            debug=False,
            pool_type='max',
            pool_size=4,
            pool_stride=4):
    [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6, W7, b7] = params
    ########################
    #  Define the computation of the first convolutional layer
    ########################
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=(1, 16),
                             num_filter=64,
                             stride=(1, 1),
                             dilate=(1, 1))
    h1_pooling = nd.Pooling(data=h1_conv,
                            pool_type=pool_type,
                            kernel=(1, pool_size),
                            stride=(1, pool_stride))
    h1 = relu(h1_pooling)
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    ########################
    #  Define the computation of the second convolutional layer
    ########################
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=(1, 16),
                             num_filter=128,
                             stride=(1, 1),
                             dilate=(1, 2))
    h2_pooling = nd.Pooling(data=h2_conv,
                            pool_type=pool_type,
                            kernel=(1, pool_size),
                            stride=(1, pool_stride))
    h2 = relu(h2_pooling)
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Define the computation of the third convolutional layer
    ########################
    h3_conv = nd.Convolution(data=h2,
                             weight=W3,
                             bias=b3,
                             kernel=(1, 16),
                             num_filter=256,
                             stride=(1, 1),
                             dilate=(1, 2))
    h3_pooling = nd.Pooling(data=h3_conv,
                            pool_type=pool_type,
                            kernel=(1, pool_size),
                            stride=(1, pool_stride))
    h3 = relu(h3_pooling)
    if debug:
        print("h3 shape: %s" % (np.array(h3.shape)))

    ########################
    #  Define the computation of the 4th convolutional layer
    ########################
    h4_conv = nd.Convolution(data=h3,
                             weight=W4,
                             bias=b4,
                             kernel=(1, 32),
                             num_filter=512,
                             stride=(1, 1),
                             dilate=(1, 2))
    h4_pooling = nd.Pooling(data=h4_conv,
                            pool_type=pool_type,
                            kernel=(1, pool_size),
                            stride=(1, pool_stride))
    h4 = relu(h4_pooling)
    if debug:
        print("h4 shape: %s" % (np.array(h4.shape)))

    ########################
    #  Flattening h4 so that we can feed it into a fully-connected layer
    ########################
    h5 = nd.flatten(h4)
    if debug:
        print("Flat h5 shape: %s" % (np.array(h5.shape)))

    ########################
    #  Define the computation of the 5th (fully-connected) layer
    ########################
    h6_linear = nd.dot(h5, W5) + b5
    h6 = relu(h6_linear)
    if debug:
        print("h6 shape: %s" % (np.array(h6.shape)))

    ########################
    #  Define the computation of the 6th (fully-connected) layer
    ########################
    h7_linear = nd.dot(h6, W6) + b6
    h7 = relu(h7_linear)
    if debug:
        print("h7 shape: %s" % (np.array(h7.shape)))

    ########################
    #  Define the computation of the output layer
    ########################
    yhat_linear = nd.dot(h7, W7) + b7
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))

    interlayer = [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6, W7, b7]

    return yhat_linear, interlayer
Example #21
0
def net_PRL(X,
            params,
            debug=False,
            pool_type='max',
            pool_size=4,
            pool_stride=2):
    [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6, W7, b7, W8, b8, W9,
     b9] = params
    drop_prob = 0.5
    ########################
    #  Define the computation of the first convolutional layer
    ########################
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=(1, 64),
                             num_filter=8,
                             stride=(1, 1),
                             dilate=(1, 1))
    h1 = nd.LeakyReLU(h1_conv, act_type='elu')
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    ########################
    #  Define the computation of the second convolutional layer
    ########################
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=(1, 32),
                             num_filter=8,
                             stride=(1, 1),
                             dilate=(1, 1))
    h2_pooling = nd.Pooling(data=h2_conv,
                            pool_type=pool_type,
                            kernel=(1, 8),
                            stride=(1, pool_stride))
    h2 = nd.LeakyReLU(h2_pooling, act_type='elu')
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Define the computation of the third convolutional layer
    ########################
    h3_conv = nd.Convolution(data=h2,
                             weight=W3,
                             bias=b3,
                             kernel=(1, 32),
                             num_filter=16,
                             stride=(1, 1),
                             dilate=(1, 1))
    h3 = nd.LeakyReLU(h3_conv, act_type='elu')
    if debug:
        print("h3 shape: %s" % (np.array(h3.shape)))

    ########################
    #  Define the computation of the 4th convolutional layer
    ########################
    h4_conv = nd.Convolution(data=h3,
                             weight=W4,
                             bias=b4,
                             kernel=(1, 16),
                             num_filter=16,
                             stride=(1, 1),
                             dilate=(1, 1))
    h4_pooling = nd.Pooling(data=h4_conv,
                            pool_type=pool_type,
                            kernel=(1, 6),
                            stride=(1, pool_stride))
    h4 = nd.LeakyReLU(h4_pooling, act_type='elu')
    if debug:
        print("h4 shape: %s" % (np.array(h4.shape)))

    ########################
    #  Define the computation of the 5th convolutional layer
    ########################
    h5_conv = nd.Convolution(data=h4,
                             weight=W5,
                             bias=b5,
                             kernel=(1, 16),
                             num_filter=32,
                             stride=(1, 1),
                             dilate=(1, 1))
    h5 = nd.LeakyReLU(h5_conv, act_type='elu')
    if debug:
        print("h5 shape: %s" % (np.array(h5.shape)))

    ########################
    #  Define the computation of the 6th convolutional layer
    ########################
    h6_conv = nd.Convolution(data=h5,
                             weight=W6,
                             bias=b6,
                             kernel=(1, 16),
                             num_filter=32,
                             stride=(1, 1),
                             dilate=(1, 1))
    h6_pooling = nd.Pooling(data=h6_conv,
                            pool_type=pool_type,
                            kernel=(1, 4),
                            stride=(1, pool_stride))
    h6 = nd.LeakyReLU(h6_pooling, act_type='elu')
    if debug:
        print("h6 shape: %s" % (np.array(h6.shape)))

    ########################
    #  Flattening h6 so that we can feed it into a fully-connected layer
    ########################
    h7 = nd.flatten(h6)
    if debug:
        print("Flat h7 shape: %s" % (np.array(h7.shape)))

    ########################
    #  Define the computation of the 8th (fully-connected) layer
    ########################
    h8_linear = nd.dot(h7, W7) + b7
    h8 = nd.LeakyReLU(h8_linear, act_type='elu')
    if autograd.is_training():
        # 对激活函数的输出使用droupout
        h8 = dropout(h8, drop_prob)
    if debug:
        print("h8 shape: %s" % (np.array(h8.shape)))

    ########################
    #  Define the computation of the 9th (fully-connected) layer
    ########################
    h9_linear = nd.dot(h8, W8) + b8
    h9 = nd.LeakyReLU(h9_linear, act_type='elu')
    if autograd.is_training():
        # 对激活函数的输出使用droupout
        h9 = dropout(h9, drop_prob)
    if debug:
        print("h9 shape: %s" % (np.array(h9.shape)))

    ########################
    #  Define the computation of the output layer
    ########################
    yhat_linear = nd.dot(h9, W9) + b9
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))

    interlayer = [
        W1, b1, W2, b2, W3, b3, W4, b4, W5, b5, W6, b6, W7, b7, W8, b8, W9, b9
    ]

    return yhat_linear, interlayer
Example #22
0
'''
import mxnet as mx
from mxnet.gluon import nn
from mxnet import ndarray as nd

# 卷积层
# 输入输出的数据格式是: batch * channel * height * width
# 权重格式:output_channels * in_channels * height * width

w = nd.arange(4).reshape((1, 1, 2, 2))
b = nd.array([1])

data = nd.arange(9).reshape((1, 1, 3, 3))

# 卷积运算
out = nd.Convolution(data, w, b, kernel=w.shape[2:], num_filter=w.shape[1])
print('input:', data)
print('weight:', w)
print('bias:', b)
print('output:', out)

# 窗口移动和边缘填充
out = nd.Convolution(data,
                     w,
                     b,
                     kernel=w.shape[2:],
                     num_filter=w.shape[1],
                     stride=(2, 2),
                     pad=(1, 1))

print('output:', out)
Example #23
0
    def network(
        X,
        drop_rate=0.0
    ):  # formula : output_size=((input−weights+2*Padding)/Stride)+1
        #data size
        # MNIST,FashionMNIST = (batch size , 1 , 28 ,  28)
        # CIFAR = (batch size , 3 , 32 ,  32)

        # builtin The BatchNorm function moving_mean, moving_var does not work.
        C_H1 = nd.Activation(
            data=nd.BatchNorm(data=nd.Convolution(data=X,
                                                  weight=W1,
                                                  bias=B1,
                                                  kernel=(3, 3),
                                                  stride=(1, 1),
                                                  num_filter=60),
                              gamma=gamma1,
                              beta=beta1,
                              moving_mean=ma1,
                              moving_var=mv1,
                              momentum=0.9,
                              fix_gamma=False,
                              use_global_stats=True),
            act_type="relu"
        )  # MNIST : result = ( batch size , 60 , 26 , 26) , CIFAR10 : : result = ( batch size , 60 , 30 , 30)
        P_H1 = nd.Pooling(
            data=C_H1, pool_type="avg", kernel=(2, 2), stride=(2, 2)
        )  # MNIST : result = (batch size , 60 , 13 , 13) , CIFAR10 : result = (batch size , 60 , 15 , 15)
        C_H2 = nd.Activation(
            data=nd.BatchNorm(data=nd.Convolution(data=P_H1,
                                                  weight=W2,
                                                  bias=B2,
                                                  kernel=(6, 6),
                                                  stride=(1, 1),
                                                  num_filter=30),
                              gamma=gamma2,
                              beta=beta2,
                              moving_mean=ma2,
                              moving_var=mv2,
                              momentum=0.9,
                              fix_gamma=False,
                              use_global_stats=True),
            act_type="relu"
        )  # MNIST :  result = ( batch size , 30 , 8 , 8), CIFAR10 :  result = ( batch size , 30 , 10 , 10)
        P_H2 = nd.Pooling(
            data=C_H2, pool_type="avg", kernel=(2, 2), stride=(2, 2)
        )  # MNIST : result = (batch size , 30 , 4 , 4) , CIFAR10 : result = (batch size , 30 , 5 , 5)
        P_H2 = nd.flatten(data=P_H2)
        '''FullyConnected parameter
        • data: (batch_size, input_dim)
        • weight: (num_hidden, input_dim)
        • bias: (num_hidden,)
        • out: (batch_size, num_hidden)
        '''
        F_H1 = nd.Activation(nd.BatchNorm(data=nd.FullyConnected(
            data=P_H2, weight=W3, bias=B3, num_hidden=120),
                                          gamma=gamma3,
                                          beta=beta3,
                                          moving_mean=ma3,
                                          moving_var=mv3,
                                          momentum=0.9,
                                          fix_gamma=False,
                                          use_global_stats=True),
                             act_type="relu")
        F_H1 = nd.Dropout(data=F_H1, p=drop_rate)
        F_H2 = nd.Activation(nd.BatchNorm(data=nd.FullyConnected(
            data=F_H1, weight=W4, bias=B4, num_hidden=64),
                                          gamma=gamma4,
                                          beta=beta4,
                                          moving_mean=ma4,
                                          moving_var=mv4,
                                          momentum=0.9,
                                          fix_gamma=False,
                                          use_global_stats=True),
                             act_type="relu")
        F_H2 = nd.Dropout(data=F_H2, p=drop_rate)
        #softmax_Y = nd.softmax(nd.FullyConnected(data=F_H2 ,weight=W5 , bias=B5 , num_hidden=10))
        out = nd.FullyConnected(data=F_H2, weight=W5, bias=B5, num_hidden=10)
        return out
Example #24
0
def net(X,
        params,
        debug=False,
        pool_type='avg',
        pool_size=16,
        pool_stride=2,
        act_type='relu',
        dilate_size=1,
        nf=1):
    [W1, b1, W2, b2, W3, b3, W4, b4, W5, b5] = params
    ########################
    #  Define the computation of the first convolutional layer
    ########################
    h1_conv = nd.Convolution(data=X,
                             weight=W1,
                             bias=b1,
                             kernel=(1, 16),
                             num_filter=int(16 * nf),
                             stride=(1, 1),
                             dilate=(1, dilate_size))
    h1_activation = activation(h1_conv, act_type=act_type)
    h1 = nd.Pooling(data=h1_activation,
                    pool_type=pool_type,
                    kernel=(1, pool_size),
                    stride=(1, pool_stride))
    if debug:
        print("h1 shape: %s" % (np.array(h1.shape)))

    ########################
    #  Define the computation of the second convolutional layer
    ########################
    h2_conv = nd.Convolution(data=h1,
                             weight=W2,
                             bias=b2,
                             kernel=(1, 8),
                             num_filter=int(32 * nf),
                             stride=(1, 1),
                             dilate=(1, dilate_size))
    h2_activation = activation(h2_conv, act_type=act_type)
    h2 = nd.Pooling(data=h2_activation,
                    pool_type=pool_type,
                    kernel=(1, pool_size),
                    stride=(1, pool_stride))
    if debug:
        print("h2 shape: %s" % (np.array(h2.shape)))

    ########################
    #  Define the computation of the third convolutional layer
    ########################
    h3_conv = nd.Convolution(data=h2,
                             weight=W3,
                             bias=b3,
                             kernel=(1, 8),
                             num_filter=int(64 * nf),
                             stride=(1, 1),
                             dilate=(1, dilate_size))
    h3_activation = activation(h3_conv, act_type=act_type)
    h3 = nd.Pooling(data=h3_activation,
                    pool_type=pool_type,
                    kernel=(1, pool_size),
                    stride=(1, pool_stride))
    if debug:
        print("h3 shape: %s" % (np.array(h3.shape)))

    ########################
    #  Flattening h3 so that we can feed it into a fully-connected layer
    ########################
    h4 = nd.flatten(h3)
    if debug:
        print("Flat h4 shape: %s" % (np.array(h4.shape)))

    ########################
    #  Define the computation of the 4th (fully-connected) layer
    ########################
    h5_linear = nd.dot(h4, W4) + b4
    h5 = activation(h5_linear, act_type=act_type)
    if autograd.is_training():
        # 对激活函数的输出使用droupout
        h5 = dropout(h5, drop_prob)
    if debug:
        print("h5 shape: %s" % (np.array(h5.shape)))
        print("Dropout: ", drop_prob)

    ########################
    #  Define the computation of the output layer
    ########################
    yhat_linear = nd.dot(h5, W5) + b5
    if debug:
        print("yhat_linear shape: %s" % (np.array(yhat_linear.shape)))

    interlayer = [h1, h2, h3, h4, h5]

    return yhat_linear, interlayer
Example #25
0
np.random.seed(30)

# w = nd.array(np.random.rand(2, 3, 3, 3))
w = nd.load(
    '/home/yjr/MxNet_Codes/gluon-cv/scripts/gloun2TF/mxnet_weights/resnet50_v1b-0ecdba34.params'
)['conv1.weight']  # [64, 3, 7, 7]
# w = nd.arange(9*2).reshape((2, 1, 3, 3))
data = nd.array(np.random.rand(1, 3, 224, 224))
# data, _ = mxnet_process_img('../demo_img/person.jpg')
# data = nd.arange(6*6).reshape((1, 1, 6, 6))

# 卷积运算
out = nd.Convolution(data,
                     w,
                     no_bias=True,
                     kernel=(7, 7),
                     stride=(2, 2),
                     num_filter=64,
                     pad=(3, 3))


def tf_conv(data, w):

    data = tf.constant(data.asnumpy())
    data = tf.pad(data, paddings=[[0, 0], [0, 0], [3, 3], [3, 3]])
    tf_out = slim.conv2d(data,
                         num_outputs=64,
                         kernel_size=[7, 7],
                         padding='VALID',
                         stride=2,
                         biases_initializer=None,
def fwd_conv2d(*data, **attrs):
    assert len(data) == 2
    out = nd.Convolution(*data, **attrs)
    return out
Example #27
0
import mxnet as mx