예제 #1
0
def get_symbol(num_classes=10, flag='training', add_stn=False, **kwargs):
    data = sym.Variable('data')
    if add_stn:
        data = sym.SpatialTransformer(data=data,
                                      loc=get_loc(data),
                                      target_shape=(28, 28),
                                      transform_type="affine",
                                      sampler_type="bilinear")
    # first conv
    conv1 = sym.Convolution(data=data, kernel=(5, 5), num_filter=10)
    relu1 = sym.Activation(data=conv1, act_type="relu")
    pool1 = sym.Pooling(data=relu1,
                        pool_type="max",
                        kernel=(2, 2),
                        stride=(2, 2))
    # second conv
    conv2 = sym.Convolution(data=pool1, kernel=(5, 5), num_filter=20)
    relu2 = sym.Activation(data=conv2, act_type="relu")
    pool2 = sym.Pooling(data=relu2,
                        pool_type="max",
                        kernel=(2, 2),
                        stride=(2, 2))

    drop1 = mx.sym.Dropout(data=pool2)
    # first fullc
    flatten = sym.Flatten(data=drop1)
    fc1 = sym.FullyConnected(data=flatten, num_hidden=50)
    relu3 = sym.Activation(data=fc1, act_type="relu")
    # second fullc
    drop2 = mx.sym.Dropout(data=relu3, mode=flag)
    fc2 = sym.FullyConnected(data=drop2, num_hidden=num_classes)
    # loss
    lenet = sym.SoftmaxOutput(data=fc2, name='softmax')
    return lenet
예제 #2
0
def get_loc(data, attr={'lr_mult': '0.01'}):
    """
    the localisation network in lenet-stn, it will increase acc about more than 1%,
    when num-epoch >=15
    """
    loc = sym.Convolution(data=data,
                          num_filter=8,
                          kernel=(7, 7),
                          stride=(1, 1))
    loc = sym.Activation(data=loc, act_type='relu')
    loc = sym.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
    loc = sym.Convolution(data=loc,
                          num_filter=10,
                          kernel=(5, 5),
                          stride=(1, 1))
    loc = sym.Activation(data=loc, act_type='relu')
    loc = sym.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')

    loc = sym.FullyConnected(data=loc,
                             num_hidden=32,
                             name="stn_loc_fc1",
                             attr=attr)
    loc = sym.Activation(data=loc, act_type='relu')
    #       loc = sym.Flatten(data=loc)
    loc = sym.FullyConnected(data=loc,
                             num_hidden=6,
                             name="stn_loc_fc2",
                             attr=attr)
    return loc
예제 #3
0
def _conv(x,
          name,
          channels,
          kernel=1,
          stride=1,
          pad=0,
          dilate=1,
          groups=1,
          no_bias=False,
          norm_layer=None,
          norm_kwargs=None,
          act_type=None):
    if norm_kwargs is None:
        norm_kwargs = {}
    y = sym.Convolution(x,
                        num_filter=channels,
                        kernel=(kernel, kernel),
                        stride=(stride, stride),
                        pad=(pad, pad),
                        dilate=(dilate, dilate),
                        num_group=groups,
                        no_bias=no_bias,
                        name=f'{name}',
                        attr={
                            '__init__':
                            mx.init.Xavier(rnd_type='uniform',
                                           factor_type='in',
                                           magnitude=1.)
                        })
    if norm_layer is not None:
        y = norm_layer(y, name=f'{name}_bn', **norm_kwargs)
    y = _activate(y, act_type, name)
    return y
예제 #4
0
파일: ssd_sym.py 프로젝트: pigtamer/myssd
def getSSD():
    """
    This is for generation of ssd network symbols
    :return:
    """
    net = sym.Variable('data')
    net = sym.Convolution(data=net, num_filter=128)
    pass
예제 #5
0
def _deformable_conv(x,
                     name,
                     channels,
                     kernel=1,
                     stride=1,
                     pad=0,
                     groups=1,
                     dilate=1,
                     no_bias=False,
                     num_deformable_group=1,
                     offset_use_bias=True,
                     norm_layer=None,
                     norm_kwargs=None,
                     act_type=None):
    if norm_kwargs is None:
        norm_kwargs = {}
    offset_channels = 2 * (kernel**2) * num_deformable_group
    offset = sym.Convolution(x,
                             num_filter=offset_channels,
                             kernel=(kernel, kernel),
                             stride=(stride, stride),
                             pad=(pad, pad),
                             num_group=groups,
                             dilate=(dilate, dilate),
                             no_bias=not offset_use_bias,
                             cudnn_off=True,
                             name=f'{name}_offset')
    y = sym.contrib.DeformableConvolution(
        x,
        offset=offset,
        num_filter=channels,
        kernel=(kernel, kernel),
        stride=(stride, stride),
        pad=(pad, pad),
        num_group=groups,
        dilate=(dilate, dilate),
        no_bias=no_bias,
        num_deformable_group=num_deformable_group,
        name=f'{name}',
        attr={
            '__init__':
            mx.init.Xavier(rnd_type='uniform', factor_type='in', magnitude=1.)
        })
    if norm_layer is not None:
        y = norm_layer(y, name=f'{name}_bn', **norm_kwargs)
    y = _activate(y, act_type, name)
    return y
예제 #6
0
파일: ssd_sym.py 프로젝트: pigtamer/myssd
def getConvLayer(data,
                 name,
                 num_filter,
                 kernel_size,
                 pad=(0, 0),
                 stride=(1, 1),
                 activation='relu',
                 bn=True):
    """
    return a conv layer with act, batchnorm, and pooling layers, if any
    :return:
    """
    convunit = sym.Convolution(data=data,
                               num_filter=num_filter,
                               pad=pad,
                               stride=stride,
                               kernel=kernel_size,
                               name=name + "conv")
    if bn:
        convunit = sym.BatchNorm(data=convunit, name=name + "bn")
    convunit = sym.Activation(data=convunit,
                              act_type=activation,
                              name=name + "act")
    return convunit
예제 #7
0
def _cbam(x, name, channels, reduction, act_type='relu', spatial_dilate=0):
    """
    启用 CBAM
    :param x:               输入
    :param name:            operator name
    :param channels:        输出 channels
    :param reduction:       MLP reduction
    :param act_type:        MLP activation
    :param spatial_dilate:  spatial_dilate > 0,对于 spatial 新添加一个 dilate conv
    :return:
    """
    # =============================== channel
    # Pooling [N, C, 1, 1]
    max_pool = sym.Pooling(x,
                           pool_type='max',
                           global_pool=True,
                           name=f'{name}_max')
    avg_pool = sym.Pooling(x,
                           pool_type='avg',
                           global_pool=True,
                           name=f'{name}_avg')

    # MLP FC1 [N, C // reduction, 1, 1]
    mlp_fc1_weight = sym.Variable(f'{name}_mlp_fc1_weight',
                                  shape=(channels // reduction, 0, 1, 1))
    mlp_fc1_bias = sym.Variable(f'{name}_mlp_fc1_bias',
                                shape=(channels // reduction, ),
                                init=mx.init.Constant(0.))
    max_pool = sym.Convolution(max_pool,
                               num_filter=channels // reduction,
                               kernel=(1, 1),
                               stride=(1, 1),
                               pad=(0, 0),
                               weight=mlp_fc1_weight,
                               bias=mlp_fc1_bias,
                               name=f'{name}_max_fc1')
    avg_pool = sym.Convolution(avg_pool,
                               num_filter=channels // reduction,
                               kernel=(1, 1),
                               stride=(1, 1),
                               pad=(0, 0),
                               weight=mlp_fc1_weight,
                               bias=mlp_fc1_bias,
                               name=f'{name}_avg_fc1')
    max_pool = _activate(max_pool, act_type, name=f'{name}_max_fc1')
    avg_pool = _activate(avg_pool, act_type, name=f'{name}_avg_fc1')

    # MLP FC2 [N, C, 1, 1]
    mlp_fc2_weight = sym.Variable(f'{name}_mlp_fc2_weight',
                                  shape=(channels, 0, 1, 1))
    mlp_fc2_bias = sym.Variable(f'{name}_mlp_fc2_bias',
                                shape=(channels, ),
                                init=mx.init.Constant(0.))
    max_pool = sym.Convolution(max_pool,
                               num_filter=channels,
                               kernel=(1, 1),
                               stride=(1, 1),
                               pad=(0, 0),
                               weight=mlp_fc2_weight,
                               bias=mlp_fc2_bias,
                               name=f'{name}_max_fc2')
    avg_pool = sym.Convolution(avg_pool,
                               num_filter=channels,
                               kernel=(1, 1),
                               stride=(1, 1),
                               pad=(0, 0),
                               weight=mlp_fc2_weight,
                               bias=mlp_fc2_bias,
                               name=f'{name}_avg_fc2')
    channel_attention = _activate(max_pool + avg_pool,
                                  'sigmoid',
                                  name=f'{name}_channel')
    y = sym.broadcast_mul(x, channel_attention, name=f'{name}_channel_out')

    # =============================== spatial
    max_spatial = sym.max(y, axis=1, keepdims=True, name=f'{name}_max_spatial')
    avg_spatial = sym.mean(y,
                           axis=1,
                           keepdims=True,
                           name=f'{name}_avg_spatial')
    spatial = sym.Concat(max_spatial,
                         avg_spatial,
                         dim=1,
                         name=f'{name}_spatial_concat')
    if spatial_dilate > 0:
        dilate_spatial = _conv(y,
                               f'{name}_spatial_dilate{spatial_dilate}',
                               1,
                               kernel=3,
                               stride=1,
                               pad=spatial_dilate,
                               dilate=spatial_dilate,
                               no_bias=False)
        spatial = sym.Concat(spatial,
                             dilate_spatial,
                             dim=1,
                             name=f'{name}_spatial_concat_dilate')
    spatial_attention = _conv(spatial,
                              f'{name}_spatial_conv',
                              1,
                              kernel=7,
                              stride=1,
                              pad=3,
                              groups=1,
                              act_type='sigmoid')
    y = sym.broadcast_mul(y, spatial_attention, name=f'{name}_spatial_out')
    return y
예제 #8
0
파일: ssd_sym.py 프로젝트: pigtamer/myssd
def getPredBranch():
    net = sym.Convolution()
    pass