Ejemplo n.º 1
0
def test_lambda():
    net1 = mx.gluon.nn.HybridSequential()
    net1.add(nn.Activation('tanh'), nn.LeakyReLU(0.1))

    net2 = mx.gluon.nn.HybridSequential()
    op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
    net2.add(nn.HybridLambda('tanh'), nn.HybridLambda(op3))

    op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
    net3 = mx.gluon.nn.Sequential()
    net3.add(nn.Lambda('tanh'), nn.Lambda(op4))

    input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
    out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
    assert_almost_equal(out1.asnumpy(), out2.asnumpy())
    assert_almost_equal(out1.asnumpy(), out3.asnumpy())
Ejemplo n.º 2
0
 def __init__(self, act_func, **kwargs):
     super(Activation, self).__init__(**kwargs)
     if act_func == "relu":
         self.act = nn.Activation('relu')
     elif act_func == "relu6":
         self.act = ReLU6()
     elif act_func == "hard_sigmoid":
         self.act = HardSigmoid()
     elif act_func == "swish":
         self.act = nn.Swish()
     elif act_func == "hard_swish":
         self.act = HardSwish()
     elif act_func == "leaky":
         self.act = nn.LeakyReLU(alpha=0.375)
     else:
         raise NotImplementedError
Ejemplo n.º 3
0
def tiny_darknet(output_size=125, token_op=False):
    net = _nn.HybridSequential()
    for idx, f in enumerate([16, 32, 64, 128, 256, 512, 1024, 1024], 1):
        # Using NHWC would eliminate transposes before and after the base
        # model. However, this is currently not supported by Conv2D in mxnet,
        # so we have to use NCHW for now.
        layout = 'NCHW'
        c_axis = 3 if layout == 'NHWC' else 1

        net.add(_nn.Conv2D(channels=f,
                           kernel_size=(3, 3),
                           use_bias=False,
                           layout=layout,
                           prefix='conv%d_' % (idx - 1),
                           padding=(1, 1)))
        net.add(_nn.BatchNorm(axis=c_axis,
                              prefix='batchnorm%d_' % (idx - 1),
                              momentum=0.9,
                              epsilon=1e-5))
        net.add(_nn.LeakyReLU(0.1, prefix='leakyrelu%d_' % (idx - 1)))

        if idx < 6:
            strides = (2, 2)
            net.add(_nn.MaxPool2D(pool_size=(2, 2),
                                  strides=strides,
                                  layout=layout,
                                  prefix='pool%d_' % (idx - 1),
                                  ))

        elif idx == 6:
            strides = (1, 1)
            net.add(_nn.MaxPool2D(pool_size=(2, 2),
                                  strides=strides,
                                  layout=layout,
                                  padding=(1, 1),
                                  ceil_mode=False,
                                  prefix='pool%d_' % (idx - 1),
                                  ))
            net.add(_SpecialCrop(prefix='specialcrop%d' % (idx - 1)))

    if output_size is not None:
        net.add(_nn.Conv2D(channels=output_size,
                           kernel_size=(1, 1),
                           prefix='conv8_',
                           layout=layout))

    return net
Ejemplo n.º 4
0
 def __init__(self, dimension, num_channels, num_layers):
     super(CnnEmbedding, self).__init__()
     with self.name_scope():
         self.convolutions = nn.Sequential()
         for _ in range(num_layers - 1):
             self.convolutions.add(
                 nn.Conv2D(channels=num_channels,
                           kernel_size=3,
                           padding=1,
                           weight_initializer=mx.init.Xavier()))
             self.convolutions.add(nn.LeakyReLU(0.2))
         # No ReLu for last convolution to allow for negative features.
         self.convolutions.add(
             nn.Conv2D(channels=dimension,
                       kernel_size=3,
                       padding=1,
                       weight_initializer=mx.init.Xavier()))
Ejemplo n.º 5
0
 def __init__(self,
              in_feats,
              out_feats,
              num_heads,
              feat_drop=0.,
              attn_drop=0.,
              negative_slope=0.2,
              residual=False,
              activation=None):
     super(GATConv, self).__init__()
     self._num_heads = num_heads
     self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
     self._in_feats = in_feats
     self._out_feats = out_feats
     with self.name_scope():
         if isinstance(in_feats, tuple):
             self.fc_src = nn.Dense(out_feats * num_heads, use_bias=False,
                                    weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)),
                                    in_units=self._in_src_feats)
             self.fc_dst = nn.Dense(out_feats * num_heads, use_bias=False,
                                    weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)),
                                    in_units=self._in_dst_feats)
         else:
             self.fc = nn.Dense(out_feats * num_heads, use_bias=False,
                                weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)),
                                in_units=in_feats)
         self.attn_l = self.params.get('attn_l',
                                       shape=(1, num_heads, out_feats),
                                       init=mx.init.Xavier(magnitude=math.sqrt(2.0)))
         self.attn_r = self.params.get('attn_r',
                                       shape=(1, num_heads, out_feats),
                                       init=mx.init.Xavier(magnitude=math.sqrt(2.0)))
         self.feat_drop = nn.Dropout(feat_drop)
         self.attn_drop = nn.Dropout(attn_drop)
         self.leaky_relu = nn.LeakyReLU(negative_slope)
         if residual:
             if in_feats != out_feats:
                 self.res_fc = nn.Dense(out_feats * num_heads, use_bias=False,
                                        weight_initializer=mx.init.Xavier(
                                            magnitude=math.sqrt(2.0)),
                                        in_units=in_feats)
             else:
                 self.res_fc = Identity()
         else:
             self.res_fc = None
         self.activation = activation
 def __init__(self, alg, use_bias, **kwargs):
     super(ConvBNSumAct, self).__init__(**kwargs)
     self.conv0 = nn.Conv2D(channels=64,
                            kernel_size=(3, 3),
                            strides=1,
                            use_bias=use_bias)
     self.conv1 = nn.Conv2D(channels=64, kernel_size=(3, 3), strides=1)
     self.conv1.share_parameters(self.conv0.collect_params())
     self.bn = nn.BatchNorm()
     if alg == "relu6":
         self.act = RELU6()
     elif alg == "leakyrelu":
         self.act = nn.LeakyReLU(0.25)
     elif alg == "gelu":
         self.act = nn.GELU()
     else:
         self.act = nn.Activation(activation=alg)
Ejemplo n.º 7
0
    def __init__(self, image_size=784, h_dim=400, z_dim=20):
        super(VAE, self).__init__()
        with self.name_scope():
            encoder = nn.HybridSequential()
            encoder.add(
                nn.Dense(h_dim),
                nn.LeakyReLU(0.2),
                nn.Dense(z_dim * 2)  # 2 for mean and variance.
            )

            decoder = nn.HybridSequential()
            decoder.add(
                nn.Dense(h_dim, activation='relu'),
                nn.Dense(image_size, activation='sigmoid')
            )
        self.encoder = encoder
        self.decoder = decoder
Ejemplo n.º 8
0
def _encoder_module(in_channesl, out_channels, norm_layer=nn.BatchNorm, norm_kwargs=None):
    """
    :param in_channesl:
    :param out_channels:
    :param act_fn:
    :param norm_layer: None
    :param norm_kwargs:
    :return:
    """
    module = nn.HybridSequential()
    module.add(nn.LeakyReLU(0.2))
    module.add(nn.Conv2D(channels=out_channels, kernel_size=4, strides=2, padding=1, in_channels=in_channesl))
    if norm_layer is not None:
        norm_kwargs = {} if norm_kwargs is None else {**norm_kwargs}
        norm_kwargs['in_channels'] = out_channels
        module.add(nn.BatchNorm(in_channels=out_channels))
    return module
Ejemplo n.º 9
0
 def __init__(self, slope=0.2, nfactor=1.0, **kwargs):
     super(D_Net, self).__init__(**kwargs)
     with self.name_scope():
         self.d_net = nn.HybridSequential()
         self.d_net.add(
             nn.Conv2D(channels=intround(64 * nfactor),
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       padding=(1, 1),
                       use_bias=True),  # 256->128
             nn.LeakyReLU(alpha=slope),
             nn.Conv2D(channels=intround(128 * nfactor),
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       padding=(1, 1),
                       use_bias=True),  # 128->64
             nn.BatchNorm(),
             nn.LeakyReLU(alpha=slope),
             nn.Conv2D(channels=intround(256 * nfactor),
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       padding=(1, 1),
                       use_bias=True),  # 64->32
             nn.BatchNorm(),
             nn.LeakyReLU(alpha=slope),
             nn.Conv2D(channels=intround(512 * nfactor),
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       padding=(1, 1),
                       use_bias=True),  # 32->16
             nn.BatchNorm(),
             nn.LeakyReLU(alpha=slope),
             nn.Conv2D(channels=intround(512 * nfactor),
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       padding=(1, 1),
                       use_bias=True),  # 16->8
             nn.BatchNorm(),
             nn.LeakyReLU(alpha=slope),
             nn.Conv2D(channels=intround(512 * nfactor),
                       kernel_size=(4, 4),
                       strides=(2, 2),
                       padding=(1, 1),
                       use_bias=True),  # 8->4
             nn.BatchNorm(),
             nn.LeakyReLU(alpha=slope),
             nn.Conv2D(channels=2,
                       kernel_size=(4, 4),
                       strides=(1, 1),
                       padding=(0, 0),
                       use_bias=True),  # 4->1
             nn.Flatten())
     return
Ejemplo n.º 10
0
    def __init__(self,
                 channels,
                 odd_pointwise,
                 avg_pool_size,
                 cls_activ,
                 alpha=0.1,
                 bn_use_global_stats=False,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 **kwargs):
        super(DarkNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes

        with self.name_scope():
            self.features = nn.HybridSequential(prefix="")
            for i, channels_per_stage in enumerate(channels):
                stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
                with stage.name_scope():
                    for j, out_channels in enumerate(channels_per_stage):
                        stage.add(dark_convYxY(
                            in_channels=in_channels,
                            out_channels=out_channels,
                            bn_use_global_stats=bn_use_global_stats,
                            alpha=alpha,
                            pointwise=(len(channels_per_stage) > 1) and not(((j + 1) % 2 == 1) ^ odd_pointwise)))
                        in_channels = out_channels
                    if i != len(channels) - 1:
                        stage.add(nn.MaxPool2D(
                            pool_size=2,
                            strides=2))
                self.features.add(stage)

            self.output = nn.HybridSequential(prefix="")
            self.output.add(nn.Conv2D(
                channels=classes,
                kernel_size=1,
                in_channels=in_channels))
            if cls_activ:
                self.output.add(nn.LeakyReLU(alpha=alpha))
            self.output.add(nn.AvgPool2D(
                pool_size=avg_pool_size,
                strides=1))
            self.output.add(nn.Flatten())
def get_D_net(slope=0.2, nfactor=1.0, **kwargs):
    d_net = nn.HybridSequential(**kwargs)
    with d_net.name_scope():
        d_net.add(
            nn.Conv2D(channels=intround(64 * nfactor),
                      kernel_size=(4, 4),
                      strides=(2, 2),
                      padding=(1, 1),
                      use_bias=True),  # 256->128
            nn.LeakyReLU(alpha=slope),
            nn.Conv2D(channels=intround(128 * nfactor),
                      kernel_size=(4, 4),
                      strides=(2, 2),
                      padding=(1, 1),
                      use_bias=True),  # 128->64
            nn.BatchNorm(),
            nn.LeakyReLU(alpha=slope),
            nn.Conv2D(channels=intround(256 * nfactor),
                      kernel_size=(4, 4),
                      strides=(2, 2),
                      padding=(1, 1),
                      use_bias=True),  # 64->32
            nn.BatchNorm(),
            nn.LeakyReLU(alpha=slope),
            nn.Conv2D(channels=intround(512 * nfactor),
                      kernel_size=(4, 4),
                      strides=(2, 2),
                      padding=(1, 1),
                      use_bias=True),  # 32->16
            nn.BatchNorm(),
            nn.LeakyReLU(alpha=slope),
            nn.Conv2D(channels=intround(512 * nfactor),
                      kernel_size=(4, 4),
                      strides=(2, 2),
                      padding=(1, 1),
                      use_bias=True),  # 16->8
            nn.BatchNorm(),
            nn.LeakyReLU(alpha=slope),
            nn.Conv2D(channels=intround(512 * nfactor),
                      kernel_size=(4, 4),
                      strides=(2, 2),
                      padding=(1, 1),
                      use_bias=True),  # 8->4
            nn.BatchNorm(),
            nn.LeakyReLU(alpha=slope),
            nn.Conv2D(channels=intround(2),
                      kernel_size=(4, 4),
                      strides=(1, 1),
                      padding=(0, 0),
                      use_bias=True),  # 4->1
        )
    return d_net
Ejemplo n.º 12
0
def ConvBNBlock(channels,
                kernel_size,
                strides,
                pad,
                use_bias=False,
                leaky=True):
    blk = nn.HybridSequential()
    blk.add(
        nn.Conv2D(int(channels),
                  kernel_size=kernel_size,
                  strides=strides,
                  padding=pad,
                  use_bias=use_bias))
    if not use_bias:
        blk.add(nn.BatchNorm(in_channels=int(channels)))
    if leaky:
        blk.add(nn.LeakyReLU(0.1))
    return blk
def _conv2d(channel, kernel, padding, stride, num_sync_bn_devices=-1):
    """A common conv-bn-leakyrelu cell"""
    cell = nn.HybridSequential(prefix='')
    cell.add(
        nn.Conv2D(channel,
                  kernel_size=kernel,
                  strides=stride,
                  padding=padding,
                  use_bias=False))
    if num_sync_bn_devices < 1:
        cell.add(nn.BatchNorm(epsilon=1e-5, momentum=0.9))
    else:
        cell.add(
            gluon.contrib.nn.SyncBatchNorm(epsilon=1e-5,
                                           momentum=0.9,
                                           num_devices=num_sync_bn_devices))
    cell.add(nn.LeakyReLU(0.1))
    return cell
Ejemplo n.º 14
0
def _conv21d(out_channels,
             kernel,
             padding,
             strides,
             norm_layer=BatchNorm,
             norm_kwargs=None):
    """R(2+1)D from 'A Closer Look at Spatiotemporal Convolutions for Action Recognition'"""
    cell = nn.HybridSequential(prefix='R(2+1)D')
    if isinstance(strides, int):
        strides = (strides, strides, strides)
    cell.add(
        nn.Conv3D(out_channels,
                  kernel_size=(1, kernel, kernel),
                  strides=(1, strides[1], strides[2]),
                  padding=(0, padding, padding),
                  use_bias=False,
                  groups=1))
    cell.add(
        norm_layer(epsilon=1e-5,
                   momentum=0.9,
                   **({} if norm_kwargs is None else norm_kwargs)))
    cell.add(nn.LeakyReLU(0.1))

    if kernel == 3:  # we need to do a special repeat pad as the zeros effect the middle correct 2d pathway
        cell.add(
            Conv3DRepPad(out_channels,
                         kernel_size=(kernel, 1, 1),
                         strides=(strides[0], 1, 1),
                         padding=(1, 0, 0),
                         use_bias=False,
                         groups=out_channels))
    else:
        cell.add(
            nn.Conv3D(out_channels,
                      kernel_size=(kernel, 1, 1),
                      strides=(strides[0], 1, 1),
                      padding=(padding, 0, 0),
                      use_bias=False,
                      groups=out_channels))

    # cell.add(nn.LeakyReLU(0.1))  # this breaks the imgnet pretrain flow

    return cell
Ejemplo n.º 15
0
 def __init__(self,
              kernel,
              stride,
              dilation,
              layers,
              feature,
              channel,
              padding,
              swap_in=True,
              swap_out=True,
              arch='',
              auto=False,
              norm=False,
              device=None,
              last=True,
              flatten=False,
              reconstruct=False):
     super(Wave2DT, self).__init__()
     self.arch = arch
     self.swap_out = swap_out
     self.swap_in = swap_in
     self.reconstruct = reconstruct
     self.layers = layers
     self.kernel = kernel
     self.stride = stride
     self.dilation = dilation
     self.sequence = []
     self.norm = norm
     self.channel = channel
     with self.name_scope():
         self.activation = nn.Activation('relu')
         self.tanh = nn.Activation('tanh')
         self.sigmoid = nn.Activation('sigmoid')
         self.relu = nn.Activation('relu')
         self.lrelu = nn.LeakyReLU(0.1)
         if swap_out:
             self.fc = nn.Dense(1, 'sigmoid', flatten=False)
         self.dropout = nn.Dropout(0.5)
         if self.norm:
             self.norms = []
         self.add(layers, channel, kernel, stride, padding, dilation)
     if self.norm:
         assert (len(self.norms) == len(self.sequence))
Ejemplo n.º 16
0
def discriminator_block(in_channels,
                        out_channels,
                        kernel_size,
                        stride,
                        padding=0,
                        last_layer=False):
    net = nn.Sequential()
    net.add(
        nn.Conv2D(in_channels=in_channels,
                  channels=out_channels,
                  kernel_size=kernel_size,
                  strides=stride,
                  padding=padding))
    if last_layer:
        net.add(nn.Activation(activation='sigmoid'))
    else:
        net.add(nn.BatchNorm())
        net.add(nn.LeakyReLU(alpha=0.2))
    return net
    def __init__(self, ctx=mx.cpu(), warmup=10, runs=50, inputs=None):
        # Set the default Inputs.
        # Default data is (32, 3, 256, 256) to mimic an input of batch_size=128 and a sample image of size 3*256*256.
        default_parameters = {"data": (32, 3, 256, 256),
                              "data_initializer": nd.normal,
                              "alpha": 0.01,
                              "run_backward": True,
                              "dtype": "float32"}

        super().__init__(ctx=ctx, warmup=warmup, runs=runs, default_parameters=default_parameters,
                         custom_parameters=inputs)

        self.data = get_mx_ndarray(ctx=self.ctx, in_tensor=self.inputs["data"],
                                   dtype=self.inputs["dtype"],
                                   initializer=self.inputs["data_initializer"],
                                   attach_grad=self.inputs["run_backward"])

        self.block = nn.LeakyReLU(alpha=self.inputs["alpha"])

        self.block.initialize(ctx=self.ctx)
Ejemplo n.º 18
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              padding,
              bn_use_global_stats,
              **kwargs):
     super(DarkConv, self).__init__(**kwargs)
     with self.name_scope():
         self.conv = nn.Conv2D(
             channels=out_channels,
             kernel_size=kernel_size,
             padding=padding,
             use_bias=False,
             in_channels=in_channels)
         self.bn = nn.BatchNorm(
             in_channels=out_channels,
             use_global_stats=bn_use_global_stats)
         # self.bn = nn.BatchNorm(in_channels=out_channels, momentum=0.01)
         self.activ = nn.LeakyReLU(alpha=0.1)
Ejemplo n.º 19
0
    def __init__(self, num_classes=None):
        super(ResNetBuilder, self).__init__()
        self.base = ResNetV1()
        self.avgpool = nn.GlobalAvgPool2D()
        self.flatten = nn.Flatten()

        self.num_classes = num_classes
        self.bottleneck = nn.HybridSequential()
        self.bottleneck.add(
            nn.Dense(512,
                     in_units=2048,
                     weight_initializer=mx.initializer.MSRAPrelu('out', 0)))
        self.bottleneck.add(nn.BatchNorm(in_channels=512))
        self.bottleneck.add(nn.LeakyReLU(0.1))
        self.bottleneck.add(nn.Dropout(0.5))

        self.classifier = nn.Dense(
            self.num_classes,
            in_units=512,
            weight_initializer=mx.initializer.Normal(0.001))
def _conv2d(channel,
            kernel,
            padding,
            stride,
            norm_layer=BatchNorm,
            norm_kwargs=None):
    """A common conv-bn-leakyrelu cell"""
    cell = nn.HybridSequential(prefix='')
    cell.add(
        nn.Conv2D(channel,
                  kernel_size=kernel,
                  strides=stride,
                  padding=padding,
                  use_bias=False))
    cell.add(
        norm_layer(epsilon=1e-5,
                   momentum=0.9,
                   **({} if norm_kwargs is None else norm_kwargs)))
    cell.add(nn.LeakyReLU(0.1))
    return cell
 def __init__(self, use_bias, alg, **kwargs):
     super(ConvActAdd, self).__init__(**kwargs)
     self.conv0 = nn.Conv2D(
         channels=64,
         kernel_size=(3, 3),
         strides=1,
         use_bias=use_bias,
         weight_initializer=mx.init.Xavier(magnitude=2.24))
     if alg == "relu6":
         self.act = RELU6()
     elif alg == "leakyrelu":
         self.act = nn.LeakyReLU(0.25)
     elif alg == "gelu":
         self.act = nn.GELU()
     else:
         self.act = nn.Activation(activation=alg)
     self.conv1 = nn.Conv2D(channels=64,
                            kernel_size=(3, 3),
                            strides=1,
                            use_bias=use_bias)
     self.conv1.share_parameters(self.conv0.collect_params())
Ejemplo n.º 22
0
def Conv2dBatchLeaky(in_channels,
                     out_channels,
                     kernel,
                     stride,
                     padding,
                     norm_layer=BatchNorm,
                     norm_kwargs=None):
    """A common conv-bn-leakyrelu cell"""
    cell = nn.HybridSequential(prefix='')
    cell.add(
        nn.Conv2D(channels=out_channels,
                  kernel_size=kernel,
                  strides=stride,
                  padding=padding,
                  in_channels=in_channels,
                  use_bias=False))
    cell.add(
        norm_layer(axis=1, epsilon=1e-5, momentum=0.9, use_global_stats=False))
    cell.add(nn.LeakyReLU(0.1))

    return cell
Ejemplo n.º 23
0
def get_activation(act):
    """Get the activation based on the act string

    Parameters
    ----------
    act: str or HybridBlock

    Returns
    -------
    ret: HybridBlock
    """
    if isinstance(act, str):
        if act == 'leaky':
            return nn.LeakyReLU(0.1)
        elif act == 'identity':
            return IdentityActivation()
        elif act == 'elu':
            return ELU()
        else:
            return nn.Activation(act)
    else:
        return act
Ejemplo n.º 24
0
def get_activation(act):
    """Get the activation based on the act string

    Parameters
    ----------
    act: str or HybridBlock

    Returns
    -------
    ret: HybridBlock
    """
    if act is None:
        return lambda x: x
    if isinstance(act, str):
        if act == "leaky":
            return nn.LeakyReLU(0.1)
        elif act in ["relu", "sigmoid", "tanh", "softrelu", "softsign"]:
            return nn.Activation(act)
        else:
            raise NotImplementedError
    else:
        return act
Ejemplo n.º 25
0
 def make_crit_block(self,
                     input_dim,
                     output_dim,
                     kernel_size=4,
                     strides=2,
                     final_layer=False):
     layer = nn.HybridSequential()
     if not final_layer:
         layer.add(
             nn.Conv2D(in_channels=input_dim,
                       channels=output_dim,
                       kernel_size=kernel_size,
                       strides=strides,
                       use_bias=False),
             nn.BatchNorm(in_channels=output_dim), nn.LeakyReLU(alpha=0.2))
     else:
         layer.add(
             nn.Conv2D(in_channels=input_dim,
                       channels=output_dim,
                       kernel_size=kernel_size,
                       strides=strides))
     return layer
Ejemplo n.º 26
0
def convolution(num_filters,
                filter_size,
                norm_layer,
                norm_kwargs,
                stride=1,
                pad=1,
                use_mish=False):
    out = nn.HybridSequential()
    out.add(nn.Conv2D(num_filters, filter_size, padding=pad, strides=stride))
    if norm_layer == nn.BatchNorm:
        out.add(nn.BatchNorm(in_channels=num_filters))
    elif norm_layer == contrib_nn.SyncBatchNorm:
        out.add(
            mx.gluon.contrib.nn.SyncBatchNorm(
                in_channels=num_filters,
                num_devices=norm_kwargs['num_devices']))
    else:
        raise ValueError("Unknown norm layer type: {}".format(norm_layer))
    if use_mish:
        out.add(Mish())
    else:
        out.add(nn.LeakyReLU(alpha=0.25))
    return out
Ejemplo n.º 27
0
    def __init__(self,
                 block,
                 layers,
                 channels,
                 classes=400,
                 return_features=False,
                 **kwargs):
        super(R21DV1, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        self.return_features = return_features
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(
                _conv21d(channels[0], [3, 7, 7],
                         strides=[1, 2, 2],
                         padding=[1, 3, 3],
                         mid_channels=45,
                         prefix='init_'))
            self.features.add(
                nn.BatchNorm(epsilon=1e-3,
                             momentum=0.9,
                             use_global_stats=True,
                             prefix='init_'))
            self.features.add(nn.LeakyReLU(0.0))

            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(
                    self._make_layer(block,
                                     num_layer,
                                     channels[i + 1],
                                     stride,
                                     i + 1,
                                     in_channels=channels[i]))
            self.avg = nn.GlobalAvgPool3D()

            self.dense = nn.Dense(classes, in_units=channels[-1])
Ejemplo n.º 28
0
def _conv21d(out_channels,
             kernel,
             strides=(1, 1, 1),
             padding=(0, 0, 0),
             in_channels=0,
             mid_channels=None,
             norm_layer=nn.BatchNorm,
             norm_kwargs=None,
             prefix=''):
    """R(2+1)D from 'A Closer Look at Spatiotemporal Convolutions for Action Recognition'"""
    cell = nn.HybridSequential(prefix='R(2+1)D')
    if mid_channels is None:
        mid_channels = int(
            math.floor((kernel[0] * kernel[1] * kernel[2] * in_channels *
                        out_channels) / (kernel[1] * kernel[2] * in_channels +
                                         kernel[0] * out_channels)))

    cell.add(
        _conv3d(mid_channels, (1, kernel[1], kernel[2]),
                strides=(1, strides[1], strides[2]),
                padding=(0, padding[1], padding[2]),
                prefix=prefix + 'middle_'))

    cell.add(
        norm_layer(epsilon=1e-3,
                   momentum=0.9,
                   prefix=prefix + 'middle_',
                   **({} if norm_kwargs is None else norm_kwargs)))
    cell.add(nn.LeakyReLU(0.0))

    cell.add(
        _conv3d(out_channels, (kernel[0], 1, 1),
                strides=(strides[0], 1, 1),
                padding=(padding[0], 0, 0),
                prefix=prefix))

    return cell
Ejemplo n.º 29
0
def get_activation(act: Optional[Union[str, HybridBlock]]) -> HybridBlock:
    """Get the activation based on the string

    Parameters
    ----------
    act
        The activation

    Returns
    -------
    ret
        The activation layer

    """
    if act is None:
        return lambda x: x
    if isinstance(act, str):
        if act == 'leaky':
            # TODO(sxjscience) Add regex matching here to parse `leaky(0.1)`
            return nn.LeakyReLU(0.1)
        elif act == 'identity':
            return IdentityActivation()
        elif act == 'elu':
            return ELU()
        elif act == 'gelu':
            return GELU(mode='erf')
        elif act == 'gelu(tanh)':
            return GELU(mode='tanh')
        elif act == 'gelu(sigmoid)':
            return GELU(mode='sigmoid')
        elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']:
            return nn.Activation(act)
        else:
            raise NotImplementedError('act={} is not supported'.format(act))
    else:
        return act
Ejemplo n.º 30
0
    def __init__(self, features, config, **kwargs):
        super().__init__(**kwargs)
        self.feature_layers = [4, 5, 6, 7]
        self.feature_dims = [64, 128, 256, 512]
        self.features = features
        config = Reader(config)
        with self.name_scope():
            # self.backbone = vision.resnet50_v1()
            self.reduce_dim = nn.HybridSequential()
            self.reduce_dim.add(nn.Conv2D(64, 1))
            self.reduce_dim.add(nn.BatchNorm())
            self.reduce_dim.add(nn.Activation('relu'))

            channels = config.network.flow.channels.get([64, 32, 16, 2])
            self.flow = nn.HybridSequential(prefix='flow')
            for i, c in enumerate(channels):
                if i != 0:
                    self.flow.add(nn.LeakyReLU(0.1))
                self.flow.add(
                    nn.Conv2D(c,
                              7,
                              padding=3,
                              weight_initializer=Xavier(rnd_type='gaussian',
                                                        magnitude=2)))