Пример #1
0
 def __init__(self, channels, reduction_ratio=16, **kwargs):
     super(ChannelGate, self).__init__(**kwargs)
     with self.name_scope():
         self.avg_pool = nn.GlobalAvgPool2D()
         self.max_pool = nn.GlobalMaxPool2D()
         self.mlp = MLP(channels=channels, reduction_ratio=reduction_ratio)
         self.sigmoid = nn.Activation("sigmoid")
Пример #2
0
def get_blk(i):
    if i==0:
        return base_net()
    elif i==4:
        return nn.GlobalMaxPool2D()
    else:
        return down_sample_blk(128)
Пример #3
0
 def __init__(self, num_channels, ratio, **kwargs):
     super(CAM, self).__init__(**kwargs)
     with self.name_scope():
         self.avg_pool = nn.GlobalAvgPool2D()
         self.max_pool = nn.GlobalMaxPool2D()
         self.conv1 = nn.Conv2D(num_channels // ratio, 1, use_bias=False)
         self.conv2 = nn.Conv2D(num_channels, 1, use_bias=False)
Пример #4
0
 def __init__(self,
              num_classes,
              kernel_first=11,
              stride_first=4,
              padding_first=5,
              fc_size=4096):
     super(AlexNet, self).__init__()
     self.layers = nn.Sequential()
     self.layers.add(
         nn.Conv2D(96,
                   kernel_first,
                   strides=stride_first,
                   padding=padding_first,
                   activation="relu"), LRN(), nn.MaxPool2D(3, 2, padding=1),
         nn.Conv2D(256,
                   5,
                   strides=1,
                   padding=2,
                   activation="relu",
                   groups=2), LRN(), nn.MaxPool2D(3, 2, padding=1),
         nn.Conv2D(384, 3, padding=1, activation="relu"),
         nn.Conv2D(384, 3, padding=1, activation="relu", groups=2),
         nn.Conv2D(256,
                   kernel_size=3,
                   padding=1,
                   activation="relu",
                   groups=2), nn.GlobalMaxPool2D(),
         nn.Dense(fc_size, activation="relu"), nn.Dropout(0.5),
         nn.Dense(fc_size, activation="relu"), nn.Dropout(0.5),
         nn.Dense(num_classes))
     return
Пример #5
0
def get_blk(i):
    if i == 0:
        blk = body_blk()
    elif i == 4:
        blk = nn.GlobalMaxPool2D()
    else:
        blk = down_sample_blk(128)
    return blk
Пример #6
0
 def get_blk(self, i):
     if i == 0:
         blk = self.base_net()
     elif i == 2:
         blk = nn.GlobalMaxPool2D()
     else:
         blk = self.downSample(128)
     return blk
Пример #7
0
def get_blk(i):
    if i == 0:
        blk = base_net()
    elif i == 4:
        blk = nn.GlobalMaxPool2D()
    else:
        blk = half_W_H(128)
    return blk
Пример #8
0
 def block(self, i):
     if i == 0:
         blk = BaseNet()
     elif i == 4:
         blk = nn.GlobalMaxPool2D()
     else:
         blk = DownSampleBlock(128)
     return blk
Пример #9
0
def get_block(i):
    if i == 0:
        block = base_net()
    elif i == 4:
        block = nn.GlobalMaxPool2D()
    else:
        block = down_sample_block(128)
    return block
Пример #10
0
 def __init__(self, norm_layer, channels, reduction_ratio=16, norm_kwargs=None):
     super(Enhanced_Channel_Attenion, self).__init__()
     with self.name_scope():
         self.avg_pool = nn.GlobalAvgPool2D()
         self.max_pool = nn.GlobalMaxPool2D()
         self.down_op = nn.Conv2D(1, kernel_size=(2, 1))
         self.gate_c = nn.HybridSequential()
         self.gate_c.add(nn.Dense(channels // reduction_ratio, use_bias=False))
         self.gate_c.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
         self.gate_c.add(nn.Activation('relu'))
         self.gate_c.add(nn.Dense(channels, use_bias=False))
Пример #11
0
 def __init__(self, classNum, verbose=False, **kwargs):
     super(CIFARNET, self).__init__(**kwargs)
     with self.name_scope():
         self.convs, self.fcs = nn.HybridSequential(), nn.HybridSequential()
         self.convs.add(
             nn.Conv2D(channels=64, kernel_size=3, strides=1, padding=1))
         self.convs.add(CIFARCONV(ch=64))
         self.convs.add(CIFARCONV(ch=128, downsample=True))
         self.convs.add(CIFARCONV(ch=256, downsample=True))
         self.fcs.add(nn.GlobalMaxPool2D())
         self.fcs.add(nn.Dense(classNum))
     return
Пример #12
0
 def __init__(self, classNum, verbose=False, **kwargs):
     super(CIFARNET, self).__init__(**kwargs)
     with self.name_scope():
         self.convs, self.fcs = nn.Sequential(), nn.Sequential()
         self.convs.add(
             nn.Conv2D(channels=32, kernel_size=3, strides=1, padding=1))
         self.convs.add(CIFARCONV(ch=32))
         self.convs.add(CIFARCONV(ch=64, downsample=True))
         self.convs.add(CIFARCONV(ch=64, downsample=True))
         self.convs.add(Proto2DBlock(64, 20, imgSize, batchSize))
         self.fcs.add(nn.GlobalMaxPool2D())
         self.fcs.add(nn.Dense(classNum))
     return
Пример #13
0
    def __init__(self, depth, ctx, pretrained=True, num_features=0, num_classes=0, num_parts=1):
        super(ResNet, self).__init__()
        self.num_classes = num_classes
        self.num_parts = num_parts

        with self.name_scope():
            model = ResNet.__factory[depth](pretrained=pretrained, ctx=ctx).features[:-1]
            model[-1][0].body[0]._kwargs['stride'] = (1, 1)
            model[-1][0].downsample[0]._kwargs['stride'] = (1, 1)
            self.base = nn.HybridSequential()
            for m in model:
                self.base.add(m)

            #local
            self.feat = nn.HybridSequential()
            self.classify = nn.HybridSequential()
            for _ in range(num_parts):
                tmp = nn.HybridSequential()
                tmp.add(nn.GlobalMaxPool2D())
                feat = nn.Conv2D(channels=num_features, kernel_size=1, use_bias=False)
                feat.initialize(init.MSRAPrelu('in', 0), ctx=ctx)
                tmp.add(feat)
                bn = nn.BatchNorm()
                bn.initialize(init=init.Zero(), ctx=ctx)
                tmp.add(bn)
                tmp.add(nn.Flatten())
                self.feat.add(tmp)

                classifier = nn.Dense(num_classes, use_bias=False)
                classifier.initialize(init=init.Normal(0.001), ctx=ctx)
                self.classify.add(classifier)

            #global
            self.g_feat = nn.HybridSequential()
            self.g_classify = nn.HybridSequential()
            for _ in range(1):
                tmp = nn.HybridSequential()
                tmp.add(nn.GlobalAvgPool2D())
                feat = nn.Conv2D(channels=num_features, kernel_size=1, use_bias=False)
                feat.initialize(init.MSRAPrelu('in', 0), ctx=ctx)
                tmp.add(feat)
                bn = nn.BatchNorm(center=False, scale=False)
                bn.initialize(init=init.Zero(), ctx=ctx)
                tmp.add(bn)
                tmp.add(nn.Flatten())
                self.g_feat.add(tmp)

                classifier = nn.Dense(num_classes, use_bias=False)
                classifier.initialize(init=init.Normal(0.001), ctx=ctx)
                self.g_classify.add(classifier)
Пример #14
0
 def __init__(self,
              channels,
              reduction,
              act_cfg=dict(type='ReLU'),
              spatial_dilate=0,
              **kwargs):
     super(CBAM, self).__init__(**kwargs)
     with self.name_scope():
         self.max_pool = nn.GlobalMaxPool2D()
         self.avg_pool = nn.GlobalAvgPool2D()
         self.mlp = nn.HybridSequential()
         self.mlp.add(
             ConvBundle(channels // reduction,
                        kernel=1,
                        stride=1,
                        pad=0,
                        bias=True,
                        act_cfg=act_cfg,
                        prefix='fc1_'))
         self.mlp.add(
             ConvBundle(channels,
                        kernel=1,
                        stride=1,
                        pad=0,
                        bias=True,
                        act_cfg=None,
                        prefix='fc2_'))
         if spatial_dilate > 0:
             self.spatial_conv = ConvBundle(1,
                                            kernel=3,
                                            stride=1,
                                            pad=spatial_dilate,
                                            dilation=spatial_dilate,
                                            bias=True,
                                            act_cfg=None,
                                            prefix='spatialconv_')
         else:
             self.spatial_conv = None
         self.spatial = ConvBundle(1,
                                   kernel=7,
                                   stride=1,
                                   pad=3,
                                   bias=True,
                                   act_cfg=dict(type='Sigmoid'),
                                   prefix='spatialconv_')
Пример #15
0
def get_blk(i):
    """ 
    0.- base_net
    1, 2, 3.- down_sample_blk
    4.- GlobalMaxPool2D

    Args:
        i ([type]): [description]

    Returns:
        [type]: [description]
    """
    if i == 0:
        blk = base_net()
    elif i == 4:
        blk = nn.GlobalMaxPool2D()
    else:
        blk = down_sample_blk(128)
    return blk
Пример #16
0
    def __init__(self, ctx=mx.cpu(), warmup=5, runs=25, inputs=None):
        # Set the default Inputs
        default_parameters = {
            "data": (32, 3, 256, 256),
            "data_initializer": nd.normal,
            "layout": "NCHW",
            "run_backward": True,
            "dtype": "float32"
        }

        super().__init__(ctx=ctx,
                         warmup=warmup,
                         runs=runs,
                         default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx,
                                   in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.GlobalMaxPool2D(layout=self.inputs["layout"])
        self.block.initialize(ctx=self.ctx)
Пример #17
0
net = nn.Sequential()
# add name_scope on the outer most Sequential
with net.name_scope():
    net.add(
        mlpconv(96, 11, 0, strides=4),
        mlpconv(256, 5, 2),
        mlpconv(384, 3, 1),
        nn.Dropout(.5),
        # 目标类为10类
        mlpconv(10, 3, 1, max_pooling=False),
        # 输入为 batch_size x 10 x 5 x 5, 通过AVGPool2D转成
        # batch_size x 10 x 1 x 1。
        # 我们可以使用nn.AvgPool2D(pool_size=5),
        # 但更方便是使用全局池化,可以避免估算pool_size大小
        nn.GlobalMaxPool2D(),
        # 转成batch_size x 10
        nn.Flatten())

import sys

sys.path.append('/home/xue/gluon-tutorials-zh-master')
import utils
from mxnet import gluon
from mxnet import init

train_data, test_data = utils.load_data_fashion_mnist(batch_size=64, resize=96)

ctx = utils.try_gpu()
net.initialize(ctx=ctx, init=init.Xavier())
Пример #18
0
 def __init__(self, **kwargs):
     super(GlobalAvgMaxPool2D, self).__init__(**kwargs)
     with self.name_scope():
         self.avg_pool = nn.GlobalAvgPool2D()
         self.max_pool = nn.GlobalMaxPool2D()
Пример #19
0
def output():

    net = nn.GlobalMaxPool2D()
    net.collect_params().setattr('lr_mult', 3)
    return net