Ejemplo n.º 1
0
 def __init__(self,inputSize=168, outputSize=24 ,dilations=[1,2,4,8,16,20,32],nResidue=35, actType='relu' ,dropout=0.2, **kwargs):
     super(TCN, self).__init__(**kwargs)
     self.inputSize = inputSize
     self.outputSize = outputSize
     self.dilations = dilations
     self.encoder = nn.Sequential()
     self.outputLayer= nn.Sequential()
     with self.name_scope():
         # The embedding of auxiliary variables
         self.stationEmbedding = nn.Embedding(963,18)
         self.nYearEmbedding = nn.Embedding(3,2)
         self.nMonthEmbedding = nn.Embedding(12,2)
         self.mDayEmbedding = nn.Embedding(31,5)
         self.wdayEmbedding = nn.Embedding(7,3)
         self.nHourEmbedding = nn.Embedding(24,4)
         for d in self.dilations:
             self.encoder.add(ResidualTCN(d=d, n_residue=nResidue))
         self.decoder = (futureResidual(xDim=64))
         self.outputLayer.add(nn.Dense(64, flatten=False))
         self.outputLayer.add(nn.BatchNorm(axis=2))
         self.outputLayer.add(nn.Swish())
         #self.outputLayer.add(nn.Activation(activation=actType))
         self.outputLayer.add(nn.Dropout(dropout))
         #self.outputLayer.add(nn.Dense(1,activation='relu',flatten=False))
         self.mu = nn.Dense(1, flatten=False, activation='relu')
         self.sigma = nn.Dense(1, flatten=False, activation='softrelu')
Ejemplo n.º 2
0
def get_activation_layer(activation):
    """
    Create activation layer from string/function.

    Parameters:
    ----------
    activation : function, or str, or HybridBlock
        Activation function or name of activation function.

    Returns
    -------
    HybridBlock
        Activation layer.
    """
    assert (activation is not None)
    if isfunction(activation):
        return activation()
    elif isinstance(activation, str):
        if activation == "relu6":
            return ReLU6()
        elif activation == "swish":
            return nn.Swish()
        elif activation == "hswish":
            return HSwish()
        elif activation == "hsigmoid":
            return HSigmoid()
        else:
            return nn.Activation(activation)
    else:
        assert (isinstance(activation, HybridBlock))
        return activation
Ejemplo n.º 3
0
 def __init__(self, act_func: str, **kwargs):
     super(Activation, self).__init__()
     with self.name_scope():
         if act_func in ('relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'):
             self.act = nn.Activation(act_func)
         elif act_func == 'leaky':
             self.act = nn.LeakyReLU(**kwargs)
         elif act_func == 'prelu':
             self.act = nn.PReLU(**kwargs)
         elif act_func == 'selu':
             self.act = nn.SELU()
         elif act_func == 'elu':
             self.act = nn.ELU(**kwargs)
         elif act_func == 'gelu':
             self.act = nn.GELU()
         elif act_func == 'relu6':
             self.act = ReLU6()
         elif act_func == 'hard_sigmoid':
             self.act = HardSigmoid()
         elif act_func == 'swish':
             self.act = nn.Swish()
         elif act_func == 'hard_swish':
             self.act = HardSwish()
         elif act_func == 'mish':
             self.act = Mish()
         else:
             raise NotImplementedError(
                 f"Not implemented activation: {act_func}")
Ejemplo n.º 4
0
def _add_conv(out, channels=1, kernel=1, stride=1, pad=0,
              num_group=1, active=True):
    out.add(nn.Conv2D(channels, kernel, stride, pad, groups=num_group, use_bias=False))
    print(nn.Conv2D(channels, kernel, stride, pad, groups=num_group, use_bias=False))
    out.add(nn.BatchNorm(scale=True, momentum=0.99, epsilon=1e-3))
    if active:
        out.add(nn.Swish())
Ejemplo n.º 5
0
def get_activation(activation: str, **kwargs) -> nn.HybridBlock:
    """

    Parameters
    ----------
    activation
        Activation type

    Returns
    -------
    mxnet.gluon.HybridBlock
        Activation object

    """
    if activation in ["relu", "sigmoid", "softrelu", "softsign", "tanh"]:
        return nn.Activation(activation=activation, **kwargs)
    if activation == "lrelu":
        return nn.LeakyReLU(alpha=0.2, **kwargs)
    if activation == "elu":
        return nn.ELU(**kwargs)
    if activation == "swish":
        return nn.Swish(**kwargs)
    if activation == "lipswish":
        return LipSwish(**kwargs)
    raise NotImplementedError(activation)
Ejemplo n.º 6
0
    def __init__(self, num_clothes, num_colors, ctx):
        super(fashion_net_2_branches, self).__init__()
        self._features = model_zoo.get_model('mobilenetv2_1.0',
                                             pretrained=True,
                                             ctx=ctx).features
        for _, w in self._features.collect_params().items():
            w.grad_req = 'null'

        self._flatten = nn.Flatten()
        self._relu = nn.Activation(activation='relu')
        self._swish = nn.Swish()

        self._clothes_fc_1 = nn.Dense(100)
        self._clothes_bn = nn.BatchNorm(center=False, scale=True)
        self._clothes_out = nn.Dense(num_clothes)

        self._clothes_fc_1.initialize(init=init.Xavier(), ctx=ctx)
        self._clothes_bn.initialize(init=init.Zero(), ctx=ctx)
        self._clothes_out.initialize(init=init.Xavier(), ctx=ctx)

        self._color_fc_1 = nn.Dense(100)
        self._color_bn_1 = nn.BatchNorm(center=False, scale=True)
        self._color_fc_2 = nn.Dense(50)
        self._color_bn_2 = nn.BatchNorm(center=False, scale=True)
        self._color_out = nn.Dense(num_colors)

        self._color_fc_1.initialize(init=init.Xavier(), ctx=ctx)
        self._color_bn_1.initialize(init=init.Zero(), ctx=ctx)
        self._color_fc_2.initialize(init=init.Xavier(), ctx=ctx)
        self._color_bn_2.initialize(init=init.Zero(), ctx=ctx)
        self._color_out.initialize(init=init.Xavier(), ctx=ctx)
Ejemplo n.º 7
0
 def __init__(self, channel, se_ratio=0.25):
     super(SEModule, self).__init__()
     # self.avg_pool = nn.contrib.AdaptiveAvgPooling2D()
     self.fc = nn.HybridSequential()
     self.fc.add(nn.Dense(int(channel*se_ratio), use_bias=False, in_units=channel),
                 nn.Swish(),
                 nn.Dense(int(channel), use_bias=False, in_units=int(channel*se_ratio)),
                 nn.Activation("sigmoid")) # in mobilenet-v3, this is Hsigmoid
Ejemplo n.º 8
0
 def __init__(self, in_channel, channel, kernel_size, stride, expand=1.0, se_ratio=0.25, res_add=True):
     super(BottleNeck, self).__init__()
     self.add=res_add
     self.out = nn.HybridSequential()
     if expand==1.0:
         self.out.add(
             conv_bn(in_channel, in_channel, kernel_size, stride, groups=in_channel, activation=nn.Swish()),
             SEModule(in_channel, se_ratio),
             conv_1x1_bn(in_channel, channel, activation=nn.Swish())
         )
     else:
         self.out.add(
             conv_1x1_bn(in_channel, channel*expand, activation=nn.Swish()),
             conv_bn(channel*expand, channel*expand, kernel_size, stride, groups=channel*expand, activation=nn.Swish()),
             SEModule(channel*expand, se_ratio),
             conv_1x1_bn(channel*expand, channel, activation=nn.Swish())
         )
Ejemplo n.º 9
0
    def __init__(self,
                 width_coeff=1.0,
                 depth_coeff=1.0,
                 dropout_rate=0.0,
                 scale=1,
                 se_ratio=0.25,
                 num_classes=256):
        super(EfficientNet, self).__init__()
        channels = [32, 16, 24, 40, 80, 112, 192, 320, 1280]
        expands = [1, 6, 6, 6, 6, 6, 6]
        repeats = [1, 2, 2, 3, 3, 4, 1]
        strides = [1, 2, 2, 2, 1, 2, 1]
        kernel_sizes = [3, 3, 5, 3, 5, 5, 3]

        channels = [round(x * width_coeff)
                    for x in channels]  # [int(x*width) for x in channels]
        repeats = [round(x * depth_coeff)
                   for x in repeats]  # [int(x*width) for x in repeats]

        self.out = nn.HybridSequential()
        if scale != 1:
            # Here, should do interpolation to resize input_image to resolution in "bi" mode
            # self.out.add(utils.UpSampling(scale))
            pass
        self.out.add(
            nn.Conv2D(channels[0],
                      3,
                      2,
                      padding=1,
                      use_bias=False,
                      in_channels=3))
        self.out.add(nn.BatchNorm(scale=True))
        for i in range(7):
            self.out.add(
                utils.MBBlock(channels[i], channels[i + 1], repeats[i],
                              kernel_sizes[i], strides[i], expands[i],
                              se_ratio))
        self.out.add(
            utils.conv_1x1_bn(channels[7], channels[8], nn.Swish()),
            # utils.AdaptiveAvgPool2D(1),
            nn.GlobalAvgPool2D(),
            nn.Flatten(),
            nn.Dropout(dropout_rate),
            nn.Dense(num_classes, use_bias=False, in_units=channels[8]),
            nn.BatchNorm(scale=True),
            nn.Swish())
Ejemplo n.º 10
0
 def __init__(self, num_filter, kernel, pad, activation, **kwargs):
     super(Conv_Block, self).__init__(**kwargs)
     with self.name_scope():
         self.conv = nn.Conv2D(num_filter, kernel, padding=pad)
         self.norm = nn.BatchNorm()
         if activation == "swish":
             self.act = nn.Swish()
         else:
             self.act = nn.Activation(activation)
Ejemplo n.º 11
0
 def __init__(self, act_type, channels=1, **kwargs):
     super(Activation, self).__init__(**kwargs)
     with self.name_scope():
         if act_type == 'prelu':
             self.activation = PReLU(channels=channels, prefix='prelu_')
         elif act_type == 'relu':
             self.activation = nn.Activation(act_type, prefix='relu_')
         elif act_type == 'swish':
             self.activation = nn.Swish(beta=1.0, prefix='swish_')
         else:
             sys.exit('Wrong activation type: %s' % act_type)
Ejemplo n.º 12
0
    def __init__(self,
                 n_input,
                 n_encoder_state,
                 n_wave_output,
                 decoder_arch,
                 n_layers=1,
                 dropout=0.5,
                 device=None):

        super(MPEWD, self).__init__()

        self.wd = []

        self.n_path = decoder_arch.n_layers  # 暫時用這個代替

        print('npath : ', self.n_path)

        self.wa = [None] * self.n_path

        for p in range(self.n_path):

            self.wa[p] = copy.deepcopy(decoder_arch)

            self.wa[p].size = [decoder_arch.size[p]]

            self.wa[p].n_layers = n_layers

        with self.name_scope():

            self.e = Encoder(n_input, n_encoder_state, 1, dropout)

            for p in range(self.n_path):

                self.wd.append(
                    WaveDecoder(self.wa[p],
                                n_layers,
                                device=device,
                                last=False))

                self.register_child(self.wd[p])

            self.norm = nn.BatchNorm(axis=2)

            #self.norm = nn.LayerNorm(axis = 2)

            self.relu = nn.Swish()

            #self.fc = nn.Dense(1, activation = "sigmoid", flatten = False)
            self.fc = nn.Dense(1, flatten=False)
Ejemplo n.º 13
0
    def __init__(self, act_type, r, skernel, dilation, channels, useReLU, useGlobal, asBackbone,
                 stride, downsample=False, in_channels=0, norm_layer=BatchNorm,
                 norm_kwargs=None, **kwargs):
        super(ResBlockV2ATAC, self).__init__(**kwargs)
        self.bn1 = norm_layer(**({} if norm_kwargs is None else norm_kwargs))
        self.conv1 = _conv3x3(channels, stride, in_channels)
        self.bn2 = norm_layer(**({} if norm_kwargs is None else norm_kwargs))
        self.conv2 = _conv3x3(channels, 1, channels)
        if downsample:
            self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False, in_channels=in_channels)
        else:
            self.downsample = None

        if act_type == 'relu':
            self.msAA1 = nn.Activation('relu')
            self.msAA2 = nn.Activation('relu')
        elif act_type == 'prelu':
            self.msAA1 = nn.PReLU()
            self.msAA2 = nn.PReLU()
        elif act_type == 'elu':
            self.msAA1 = nn.ELU()
            self.msAA2 = nn.ELU()
        elif act_type == 'selu':
            self.msAA1 = nn.SELU()
            self.msAA2 = nn.SELU()
        elif act_type == 'gelu':
            self.msAA1 = nn.GELU()
            self.msAA2 = nn.GELU()
        elif act_type == 'swish':
            self.msAA1 = nn.Swish()
            self.msAA2 = nn.Swish()
        elif act_type == 'ChaATAC':
            self.msAA1 = ChaATAC(channels=in_channels, r=r, useReLU=useReLU, useGlobal=useGlobal)
            self.msAA2 = ChaATAC(channels=channels, r=r, useReLU=useReLU, useGlobal=useGlobal)
        else:
            raise ValueError("Unknown act_type in ResBlockV2ATAC")
Ejemplo n.º 14
0
 def __init__(self, act_func, **kwargs):
     super(Activation, self).__init__(**kwargs)
     if act_func == "relu":
         self.act = nn.Activation('relu')
     elif act_func == "relu6":
         self.act = ReLU6()
     elif act_func == "hard_sigmoid":
         self.act = HardSigmoid()
     elif act_func == "swish":
         self.act = nn.Swish()
     elif act_func == "hard_swish":
         self.act = HardSwish()
     elif act_func == "leaky":
         self.act = nn.LeakyReLU(alpha=0.375)
     else:
         raise NotImplementedError
Ejemplo n.º 15
0
def get_activation_layer(activation):
    assert (activation is not None)
    if isfunction(activation):
        return activation()
    elif isinstance(activation, str):
        if activation == "relu6":
            return ReLU6()
        elif activation == "swish":
            return nn.Swish()
        elif activation == "hswish":
            return HSwish()
        elif activation == "hsigmoid":
            return HSigmoid()
        else:
            return nn.Activation(activation)
    else:
        assert (isinstance(activation, HybridBlock))
        return activation
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default data is (32, 3, 256, 256) to mimic an input of batch_size=128 and a sample image of size 3*256*256.
        default_parameters = {"data": (32, 3, 256, 256),
                              "data_initializer": nd.normal,
                              "run_backward": True,
                              "dtype": "float32"}

        super().__init__(ctx=ctx, warmup=warmup, runs=runs, default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx, in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.Swish()

        self.block.initialize(ctx=self.ctx)
Ejemplo n.º 17
0
def _add_conv(out,
              channels=1,
              kernel=1,
              stride=1,
              pad=0,
              num_group=1,
              active=True,
              batchnorm=True):
    out.add(SamePadding(kernel, stride, dilation=(1, 1)))
    out.add(
        nn.Conv2D(channels,
                  kernel,
                  stride,
                  pad,
                  groups=num_group,
                  use_bias=False))
    if batchnorm:
        out.add(nn.BatchNorm(scale=True, momentum=0.99, epsilon=1e-3))
    if active:
        out.add(nn.Swish())
Ejemplo n.º 18
0
def test_activations_swish():
    act_layer = nn.Swish()
    out = act_layer(mx.np.random.uniform(size=(10,)))
    out.asnumpy()
Ejemplo n.º 19
0
    test_x = np.arange(-20, 20, 0.1)

    n_model = 20
    n_batch = 5000
    batch_size = 64

    activations = {
        'ReLU': mx.nd.relu,
        'Sigmoid': mx.nd.sigmoid,
        'Tanh': mx.nd.tanh,
        'Relu6': lambda x: mx.nd.clip(mx.nd.relu(x), 0, 6),
        'LeakyRelu': mx.nd.LeakyReLU,
        'ELU': nn.ELU(),
        'SELU': nn.SELU(),
        'PReLU': nn.PReLU(),
        'Swish': nn.Swish(),
    }

    legends = []
    for act in activations:
        test_err = np.zeros_like(test_x)

        for i in range(n_model):
            print("Train:  %s %d/%d" % (act, i + 1, n_model))
            net = Net(act=activations[act])
            net.collect_params().initialize(mx.init.Xavier(), ctx=ctx)

            train(net, train_x[0], train_x[1], batch_size, n_batch)
            err = evaluate(net, test_x)

            test_err += err
Ejemplo n.º 20
0
    def __init__(self, layers, channels, classes,
                 act_type, r, skernel, dilation, useReLU, useGlobal, act_layers, replace_act,
                 act_order, asBackbone, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(ResNet20V2ATAC, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(norm_layer(scale=False, center=False,
                                         **({} if norm_kwargs is None else norm_kwargs)))
            self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False))

            in_channels = channels[0]
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                if act_order == 'bac':
                    if i + act_layers < len(channels):
                        tmp_act_type = replace_act
                    else:
                        tmp_act_type = act_type
                elif act_order == 'pre':
                    if i + 1 > act_layers:
                        tmp_act_type = replace_act
                    else:
                        tmp_act_type = act_type
                else:
                    raise ValueError('Unknown act_order')
                self.features.add(self._make_layer(
                    layers=num_layer, channels=channels[i+1], in_channels=in_channels,
                    stride=stride, stage_index=i+1, act_type=tmp_act_type, r=r, skernel=skernel,
                    dilation=dilation, useReLU=useReLU, useGlobal=useGlobal,
                    asBackbone=asBackbone, norm_layer=norm_layer, norm_kwargs=norm_kwargs
                ))
                in_channels = channels[i+1]

            self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))

            if act_order == 'bac':
                if act_layers <= 0:
                    tmp_act_type = replace_act
                else:
                    tmp_act_type = act_type
            elif act_order == 'pre':
                if act_layers >= 4:
                    tmp_act_type = act_type
                else:
                    tmp_act_type = replace_act
            else:
                raise ValueError('Unknown act_order')

            if tmp_act_type == 'relu':
                self.features.add(nn.Activation('relu'))
            elif tmp_act_type == 'prelu':
                self.features.add(nn.PReLU())
            elif tmp_act_type == 'elu':
                self.features.add(nn.ELU())
            elif tmp_act_type == 'selu':
                self.features.add(nn.SELU())
            elif tmp_act_type == 'gelu':
                self.features.add(nn.GELU())
            elif tmp_act_type == 'swish':
                self.features.add(nn.Swish())
            elif tmp_act_type == 'ChaATAC':
                self.features.add(ChaATAC(channels=in_channels, r=r, useReLU=useReLU,
                                          useGlobal=useGlobal))
            else:
                raise ValueError("Unknown act_type in ResBlockV2ATAC")

            self.features.add(nn.GlobalAvgPool2D())
            self.features.add(nn.Flatten())
            self.output = nn.Dense(classes, in_units=in_channels)