コード例 #1
0
 def prepare_model(self):
     l1_dense = sym.FullyConnected(data=sym.Variable("data"),
                                   num_hidden=80,
                                   no_bias=False,
                                   name="l1_dense")
     l2_dense = sym.FullyConnected(data=l1_dense,
                                   num_hidden=120,
                                   no_bias=True,
                                   name="l2_dense")
     l2_activation = sym.Activation(data=l2_dense,
                                    act_type="relu",
                                    name="l2_activation")
     l3_dense = sym.FullyConnected(data=l2_activation,
                                   num_hidden=60,
                                   no_bias=True,
                                   name="l3_dense")
     l3_activation = sym.Activation(data=l3_dense,
                                    act_type="relu",
                                    name="l3_activation")
     l4_dense = sym.FullyConnected(data=l3_activation,
                                   num_hidden=20,
                                   no_bias=True,
                                   name="l4_dense")
     l4_activation = sym.Activation(data=l4_dense,
                                    act_type="relu",
                                    name="l4_activation")
     self.l5_dense = sym.FullyConnected(data=l4_activation,
                                        num_hidden=1,
                                        no_bias=False,
                                        name="l5_dense")
     output = sym.MAERegressionOutput(data=self.l5_dense,
                                      label=sym.Variable("target"))
     self.train_module = mod.Module(symbol=output,
                                    data_names=["data"],
                                    label_names=["target"])
コード例 #2
0
def get_symbol(num_classes=10, flag='training', add_stn=False, **kwargs):
    data = sym.Variable('data')
    if add_stn:
        data = sym.SpatialTransformer(data=data,
                                      loc=get_loc(data),
                                      target_shape=(28, 28),
                                      transform_type="affine",
                                      sampler_type="bilinear")
    # first conv
    conv1 = sym.Convolution(data=data, kernel=(5, 5), num_filter=10)
    relu1 = sym.Activation(data=conv1, act_type="relu")
    pool1 = sym.Pooling(data=relu1,
                        pool_type="max",
                        kernel=(2, 2),
                        stride=(2, 2))
    # second conv
    conv2 = sym.Convolution(data=pool1, kernel=(5, 5), num_filter=20)
    relu2 = sym.Activation(data=conv2, act_type="relu")
    pool2 = sym.Pooling(data=relu2,
                        pool_type="max",
                        kernel=(2, 2),
                        stride=(2, 2))

    drop1 = mx.sym.Dropout(data=pool2)
    # first fullc
    flatten = sym.Flatten(data=drop1)
    fc1 = sym.FullyConnected(data=flatten, num_hidden=50)
    relu3 = sym.Activation(data=fc1, act_type="relu")
    # second fullc
    drop2 = mx.sym.Dropout(data=relu3, mode=flag)
    fc2 = sym.FullyConnected(data=drop2, num_hidden=num_classes)
    # loss
    lenet = sym.SoftmaxOutput(data=fc2, name='softmax')
    return lenet
コード例 #3
0
def get_loc(data, attr={'lr_mult': '0.01'}):
    """
    the localisation network in lenet-stn, it will increase acc about more than 1%,
    when num-epoch >=15
    """
    loc = sym.Convolution(data=data,
                          num_filter=8,
                          kernel=(7, 7),
                          stride=(1, 1))
    loc = sym.Activation(data=loc, act_type='relu')
    loc = sym.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
    loc = sym.Convolution(data=loc,
                          num_filter=10,
                          kernel=(5, 5),
                          stride=(1, 1))
    loc = sym.Activation(data=loc, act_type='relu')
    loc = sym.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')

    loc = sym.FullyConnected(data=loc,
                             num_hidden=32,
                             name="stn_loc_fc1",
                             attr=attr)
    loc = sym.Activation(data=loc, act_type='relu')
    #       loc = sym.Flatten(data=loc)
    loc = sym.FullyConnected(data=loc,
                             num_hidden=6,
                             name="stn_loc_fc2",
                             attr=attr)
    return loc
コード例 #4
0
def hybrid_fusionFMaps(lMap, sMap, neighbor_scale=2, method='upconv'):
    # lMap/sMap stand for large/small feature maps
    # methods: 'upconv', 'lin_interpol'

    if method == 'upconv':
        raise Exception("NOT IMPLEMENTED YET.")
        upconver = sym.Deconvolution(data=sMap,
                                     kernel=upconv_ksize,
                                     num_filter=512,
                                     stride=(1, 1),
                                     weight=sym.random.normal(
                                         0, 1, (512, 512, 3, 3)))
        upconver = sym.Activation(data=upconver, act_type='relu')
        upconv_sMap = sym.BatchNorm(data=upconver,
                                    gamma=sym.random_gamma(alpha=9,
                                                           beta=0.5,
                                                           shape=(2, 2)))
        # upconver.initialize(ctx=mx.gpu())  # how to init? should I make the params trainable?
        # TODO: Modify this. Figure out a way to deal with size problem brought by pooling
        upconv_sMap = sym.UpSampling(sMap, scale=2, sample_type="nearest")
    elif method == 'bilinear':
        upconv_sMap = sym.UpSampling(sMap,
                                     scale=neighbor_scale,
                                     sample_type="nearest")

    else:
        raise Exception(
            "ERROR! [jcy checkpoint]: Unexpected enlarging method.")

    res = (lMap + upconv_sMap) / 2  # add large fmap with the smaller one
    # res = sym.broadcast_div(res, sym.max(res))
    return res
コード例 #5
0
def _mish(x, name):
    """
    mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
    :param x:
    :param name:
    :return:
    """
    return sym.elemwise_mul(x,
                            sym.tanh(sym.Activation(x, act_type='softrelu')),
                            name=f'{name}_mish')
コード例 #6
0
def _activate(x, act_type: Optional[str] = None, name: Optional[str] = None):
    """
    激活 x
    :param x:
    :param act_type:    {None, 'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh', 'swish', 'mish'}
    :param name:
    :return:
    """
    if name is None:
        name = 'activation'
    if act_type is None:
        return x
    elif act_type == 'swish':
        return _swish(x, name)
    elif act_type == 'mish':
        return _mish(x, name)
    else:
        return sym.Activation(x, act_type=act_type, name=f'{name}_{act_type}')
コード例 #7
0
ファイル: ssd_sym.py プロジェクト: pigtamer/myssd
def getConvLayer(data,
                 name,
                 num_filter,
                 kernel_size,
                 pad=(0, 0),
                 stride=(1, 1),
                 activation='relu',
                 bn=True):
    """
    return a conv layer with act, batchnorm, and pooling layers, if any
    :return:
    """
    convunit = sym.Convolution(data=data,
                               num_filter=num_filter,
                               pad=pad,
                               stride=stride,
                               kernel=kernel_size,
                               name=name + "conv")
    if bn:
        convunit = sym.BatchNorm(data=convunit, name=name + "bn")
    convunit = sym.Activation(data=convunit,
                              act_type=activation,
                              name=name + "act")
    return convunit
コード例 #8
0
def _swish(x, name):
    return sym.elemwise_mul(x,
                            sym.Activation(x, act_type='sigmoid'),
                            name=f'{name}_swish')