예제 #1
0
 def __init__(self, use_bias, alg, **kwargs):
     super(ConvAct, self).__init__(**kwargs)
     self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)
     if alg == "relu6":
       self.act = RELU6()
     elif alg == "leakyrelu":
       self.act = nn.LeakyReLU(0.25)
     elif alg == "gelu":
       self.act = nn.GELU()
     elif alg == "gelu_tanh":
       self.act = nn.GELU(approximation='tanh')
     else:
       self.act = nn.Activation(activation = alg)
예제 #2
0
 def __init__(self, act_func: str, **kwargs):
     super(Activation, self).__init__()
     with self.name_scope():
         if act_func in ('relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'):
             self.act = nn.Activation(act_func)
         elif act_func == 'leaky':
             self.act = nn.LeakyReLU(**kwargs)
         elif act_func == 'prelu':
             self.act = nn.PReLU(**kwargs)
         elif act_func == 'selu':
             self.act = nn.SELU()
         elif act_func == 'elu':
             self.act = nn.ELU(**kwargs)
         elif act_func == 'gelu':
             self.act = nn.GELU()
         elif act_func == 'relu6':
             self.act = ReLU6()
         elif act_func == 'hard_sigmoid':
             self.act = HardSigmoid()
         elif act_func == 'swish':
             self.act = nn.Swish()
         elif act_func == 'hard_swish':
             self.act = HardSwish()
         elif act_func == 'mish':
             self.act = Mish()
         else:
             raise NotImplementedError(
                 f"Not implemented activation: {act_func}")
예제 #3
0
 def __init__(self, alg, use_bias, **kwargs):
     super(ConvBNAct, self).__init__(**kwargs)
     self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)
     self.bn = nn.BatchNorm()
     if alg == "relu6":
       self.act = RELU6()
     elif alg == "leakyrelu":
       self.act = nn.LeakyReLU(0.25)
     elif alg == "gelu":
       self.act = nn.GELU()
     else:
       self.act = nn.Activation(activation = alg)
예제 #4
0
 def __init__(self, use_bias, alg, **kwargs):
     super(ConvActAdd, self).__init__(**kwargs)
     self.conv0 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias,
                            weight_initializer=mx.init.Xavier(magnitude=2.24))
     if alg == "relu6":
       self.act = RELU6()
     elif alg == "leakyrelu":
       self.act = nn.LeakyReLU(0.25)
     elif alg == "gelu":
       self.act = nn.GELU()
     else:
       self.act = nn.Activation(activation = alg)
     self.conv1 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1, use_bias=use_bias)
     self.conv1.share_parameters(self.conv0.collect_params())
예제 #5
0
    def __init__(self, act_type, r, skernel, dilation, channels, useReLU, useGlobal, asBackbone,
                 stride, downsample=False, in_channels=0, norm_layer=BatchNorm,
                 norm_kwargs=None, **kwargs):
        super(ResBlockV2ATAC, self).__init__(**kwargs)
        self.bn1 = norm_layer(**({} if norm_kwargs is None else norm_kwargs))
        self.conv1 = _conv3x3(channels, stride, in_channels)
        self.bn2 = norm_layer(**({} if norm_kwargs is None else norm_kwargs))
        self.conv2 = _conv3x3(channels, 1, channels)
        if downsample:
            self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False, in_channels=in_channels)
        else:
            self.downsample = None

        if act_type == 'relu':
            self.msAA1 = nn.Activation('relu')
            self.msAA2 = nn.Activation('relu')
        elif act_type == 'prelu':
            self.msAA1 = nn.PReLU()
            self.msAA2 = nn.PReLU()
        elif act_type == 'elu':
            self.msAA1 = nn.ELU()
            self.msAA2 = nn.ELU()
        elif act_type == 'selu':
            self.msAA1 = nn.SELU()
            self.msAA2 = nn.SELU()
        elif act_type == 'gelu':
            self.msAA1 = nn.GELU()
            self.msAA2 = nn.GELU()
        elif act_type == 'swish':
            self.msAA1 = nn.Swish()
            self.msAA2 = nn.Swish()
        elif act_type == 'ChaATAC':
            self.msAA1 = ChaATAC(channels=in_channels, r=r, useReLU=useReLU, useGlobal=useGlobal)
            self.msAA2 = ChaATAC(channels=channels, r=r, useReLU=useReLU, useGlobal=useGlobal)
        else:
            raise ValueError("Unknown act_type in ResBlockV2ATAC")
예제 #6
0
def test_activations_gelu():
    act_layer = nn.GELU()
    out = act_layer(mx.np.random.uniform(size=(10,)))
    out.asnumpy()
예제 #7
0
 def __init__(self, units, hidden_size, **kwargs):
     super(PositionWiseFFN, self).__init__(**kwargs)
     self.ffn_1 = nn.Dense(hidden_size, flatten=False)
     self.activation = nn.GELU()
     self.ffn_2 = nn.Dense(units, flatten=False)
예제 #8
0
    def __init__(self, layers, channels, classes,
                 act_type, r, skernel, dilation, useReLU, useGlobal, act_layers, replace_act,
                 act_order, asBackbone, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
        super(ResNet20V2ATAC, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(norm_layer(scale=False, center=False,
                                         **({} if norm_kwargs is None else norm_kwargs)))
            self.features.add(nn.Conv2D(channels[0], 3, 1, 1, use_bias=False))

            in_channels = channels[0]
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                if act_order == 'bac':
                    if i + act_layers < len(channels):
                        tmp_act_type = replace_act
                    else:
                        tmp_act_type = act_type
                elif act_order == 'pre':
                    if i + 1 > act_layers:
                        tmp_act_type = replace_act
                    else:
                        tmp_act_type = act_type
                else:
                    raise ValueError('Unknown act_order')
                self.features.add(self._make_layer(
                    layers=num_layer, channels=channels[i+1], in_channels=in_channels,
                    stride=stride, stage_index=i+1, act_type=tmp_act_type, r=r, skernel=skernel,
                    dilation=dilation, useReLU=useReLU, useGlobal=useGlobal,
                    asBackbone=asBackbone, norm_layer=norm_layer, norm_kwargs=norm_kwargs
                ))
                in_channels = channels[i+1]

            self.features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))

            if act_order == 'bac':
                if act_layers <= 0:
                    tmp_act_type = replace_act
                else:
                    tmp_act_type = act_type
            elif act_order == 'pre':
                if act_layers >= 4:
                    tmp_act_type = act_type
                else:
                    tmp_act_type = replace_act
            else:
                raise ValueError('Unknown act_order')

            if tmp_act_type == 'relu':
                self.features.add(nn.Activation('relu'))
            elif tmp_act_type == 'prelu':
                self.features.add(nn.PReLU())
            elif tmp_act_type == 'elu':
                self.features.add(nn.ELU())
            elif tmp_act_type == 'selu':
                self.features.add(nn.SELU())
            elif tmp_act_type == 'gelu':
                self.features.add(nn.GELU())
            elif tmp_act_type == 'swish':
                self.features.add(nn.Swish())
            elif tmp_act_type == 'ChaATAC':
                self.features.add(ChaATAC(channels=in_channels, r=r, useReLU=useReLU,
                                          useGlobal=useGlobal))
            else:
                raise ValueError("Unknown act_type in ResBlockV2ATAC")

            self.features.add(nn.GlobalAvgPool2D())
            self.features.add(nn.Flatten())
            self.output = nn.Dense(classes, in_units=in_channels)