示例#1
0
 def __init__(self, num_experts):
     super(DefaultRouter, self).__init__()
     with self.name_scope():
         self.body = HybridSequential(prefix='')
         self.body.add(GlobalAvgPool2D())
         # self.body.add(Dense(num_experts//4, activation='relu'))
         self.body.add(Dense(num_experts, activation='sigmoid'))
示例#2
0
文件: P2PGAN.py 项目: wshaow/GAN
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False):
        super(Discriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 4
            padding = int(np.ceil((kernel_size - 1)/2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = min(2 ** n, 8)
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n_layers, 8)
            self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult))
            if use_sigmoid:
                self.model.add(Activation(activation='sigmoid'))
示例#3
0
 def __init__(self, opts):
     super(BasicBlock, self).__init__()
     self.bblock = HybridSequential()
     if opts.bottle_neck:
         self.bblock.add(
             BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         if not opts.trans_block:
             self.bblock.add(LeakyReLU(alpha=.2))
         else:
             self.bblock.add(Activation(opts.activation))
         self.bblock.add(
             Conv2D(channels=int(opts.growth_rate * 4),
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    use_bias=opts.use_bias,
                    padding=(0, 0)))
         if opts.drop_out > 0:
             self.bblock.add(Dropout(opts.drop_out))
     self.bblock.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
     self.bblock.add(Activation(activation=opts.activation))
     self.bblock.add(
         Conv2D(channels=int(opts.growth_rate),
                kernel_size=(3, 3),
                strides=(1, 1),
                use_bias=opts.use_bias,
                padding=(1, 1)))
     if opts.drop_out > 0:
         self.bblock.add(Dropout(opts.drop_out))
示例#4
0
    def __init__(self, channels, bn_mom, act_type, unit_name):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlock, self).__init__()
        self.act_type = act_type
        self.unit_name = unit_name

        self.body = HybridSequential()

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv0" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn0" % self.unit_name))
        self.body.add(
            Activation(self.act_type,
                       prefix="%s_%s0" % (self.unit_name, self.act_type)))

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv1" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn1" % self.unit_name))
    def __init__(self, opts):
        super(BasicBlock, self).__init__()
        self.bblock = HybridSequential()
        if opts.bottle_neck:
            if opts.norm_type is 'batch':
                self.bblock.add(NormLayer())
            elif opts.norm_type is 'group':
                self.bblock.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.bblock.add(InstanceNorm())
            if opts.activation in ['leaky']:
                self.bblock.add(LeakyReLU(alpha=opts.alpha))
            else:
                self.bblock.add(Activation(opts.activation))
            self.bblock.add(Conv3D(channels=int(opts.growth_rate * 4), kernel_size=(opts.zKernelSize, 1, 1),
                              strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 0, 0)))
            if opts.drop_out > 0:
                self.bblock.add(Dropout(opts.drop_out))
        if opts.norm_type is 'batch':
            self.bblock.add(NormLayer())
        elif opts.norm_type is 'group':
            self.bblock.add(GroupNorm(in_channels=int(opts.growth_rate * 4)))
        elif opts.norm_type is 'instance':
            self.bblock.add(InstanceNorm())

        if opts.activation in ['leaky']:
            self.bblock.add(LeakyReLU(opts.alpha))
        else:
            self.bblock.add(Activation(opts.activation))
        self.bblock.add(Conv3D(channels=int(opts.growth_rate), kernel_size=(opts.zKernelSize, 3, 3),
                          strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 1, 1)))
        if opts.drop_out > 0:
            self.bblock.add(Dropout(opts.drop_out))
示例#6
0
class EncoderDecoderUnit(HybridBlock):
    """Return a recursive pair of encoder - decoder"""
    def __init__(self,
                 opts,
                 num_filters,
                 stage,
                 inner_block=None,
                 innermost=False):
        super(EncoderDecoderUnit, self).__init__()

        factor = 2 if stage == 0 else 1
        encoder = EncoderBlock(opts,
                               opts.units[stage],
                               num_filters,
                               trans_block=False if stage == 0 else True)
        decoder = DecoderBlock(opts,
                               num_filters,
                               res_block=(not innermost),
                               factor=factor)
        if innermost:
            model = [encoder, decoder]
        else:
            model = [encoder, inner_block, decoder]

        self.net = HybridSequential()
        for block in model:
            self.net.add(block)

    def hybrid_forward(self, F, x, *args, **kwargs):
        """Forward"""
        out = F.concat(x, self.net(x))
        return out
示例#7
0
    def __init__(self, block, layers, channels, **kwargs):
        super(ResNetV2, self).__init__(**kwargs)
        assert len(layers) == len(channels) - 1
        with self.name_scope():

            # pretrained weight 사용하려면, self.features = nn.HybridSequential(prefix='상관없음')을 사용하는 수밖에 없다.
            self.features = HybridSequential(prefix='')
            self.features.add(BatchNorm(
                scale=False, center=False))  # 의문점 하나 : 맨 앞에 왜 batch norm을???
            self.features.add(Conv2D(channels[0], 7, 2, 3, use_bias=False))
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))
            self.features.add(MaxPool2D(3, 2, 1))  # 4번째

            in_channels = channels[0]
            # 5(c2),6(c3),7(c4),8
            for i, num_layer in enumerate(layers):
                stride = 1 if i == 0 else 2
                self.features.add(
                    self._make_layer(block,
                                     num_layer,
                                     channels[i + 1],
                                     stride,
                                     i + 1,
                                     in_channels=in_channels))
                in_channels = channels[i + 1]
            self.features.add(BatchNorm())
            self.features.add(Activation('relu'))  # 10(c5)
示例#8
0
    def __init__(self, channels, bn_mom, act_type, unit_name, use_se=True, res_scale_fac=0.2):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(_RiseResidualBlock, self).__init__(unit_name)
        self.act_type = act_type
        self.unit_name = unit_name
        self.res_scale_fac = res_scale_fac

        self.use_se = use_se

        # branch 0
        self.body = HybridSequential()
        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False,
                             prefix='%s_conv0' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn0' % self.unit_name))
        self.body.add(get_act(act_type, prefix='%s_%s0' % (unit_name, act_type)))

        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False,
                             prefix='%s_conv1' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn1' % self.unit_name))

        self.act0 = get_act(act_type, prefix='%s_%s1' % (unit_name, act_type))

        if use_se is True:
            self.se0 = _SqueezeExcitation('%s_se0' % unit_name, channels, 16, act_type)
示例#9
0
 def __init__(self,
              channels,
              in_channels,
              num_dev=1,
              pre_relu=True,
              down=True,
              **kwargs):
     super(XceptionModule, self).__init__(**kwargs)
     with self.name_scope():
         self.body = HybridSequential(prefix='body_')
         if pre_relu:
             self.body.add(nn.Activation('relu'))
         self.body.add(_make_separable_conv3(channels, in_channels))
         self.body.add(SyncBatchNorm(num_devices=num_dev))
         self.body.add(nn.Activation('relu'))
         self.body.add(_make_separable_conv3(channels, channels))
         self.body.add(SyncBatchNorm(num_devices=num_dev))
         if down:
             self.body.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
             self.downsample = HybridSequential(prefix='downsample_')
             with self.downsample.name_scope():
                 self.downsample.add(
                     nn.Conv2D(channels,
                               kernel_size=1,
                               strides=2,
                               use_bias=False))
                 self.downsample.add(SyncBatchNorm(num_devices=num_dev))
         else:
             self.body.add(nn.Activation('relu'))
             self.body.add(_make_separable_conv3(channels, channels))
             self.body.add(SyncBatchNorm(num_devices=num_dev))
             self.downsample = None
示例#10
0
    def __init__(self, name, nb_act_maps, ratio=16, act_type="relu"):

        super(_GatherExcitePlus, self).__init__(prefix=name)

        self.nb_act_maps = nb_act_maps
        self.body = HybridSequential(prefix="")

        nb_units_hidden = nb_act_maps // ratio
        with self.name_scope():
            # depthwise convolution
            # gather step
            self.body.add(Conv2D(nb_act_maps, kernel_size=3, padding=1, groups=nb_act_maps, strides=2, use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())
            self.body.add(Conv2D(nb_act_maps, kernel_size=3, padding=1, groups=nb_act_maps, strides=2, use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())
            self.body.add(Conv2D(nb_act_maps, kernel_size=3, padding=1, groups=nb_act_maps, strides=2, use_bias=False))
            self.body.add(get_act(act_type))
            self.body.add(BatchNorm())

            # get excitement parameters
            self.body.add(Dense(nb_units_hidden))
            self.body.add(get_act(act_type))
            self.body.add(Dense(nb_act_maps))
            self.body.add(get_act("sigmoid"))
示例#11
0
    def __init__(self, num_dev, num_classes=1000, **kwargs):
        super(Xception, self).__init__(**kwargs)

        with self.name_scope():
            self.features = HybridSequential(prefix='')

            # entry flow
            for i in range(2):
                self.features.add(
                    nn.Conv2D(channels=32 * (i + 1),
                              kernel_size=3,
                              padding=0,
                              strides=2 if i == 0 else 1,
                              use_bias=False))
                self.features.add(SyncBatchNorm(num_devices=num_dev))
                self.features.add(nn.Activation('relu'))

            channels = [64, 128, 256, 728]
            for i in range(len(channels) - 1):
                self.features.add(
                    XceptionModule(channels=channels[i + 1],
                                   in_channels=channels[i],
                                   num_dev=num_dev,
                                   pre_relu=(i != 0),
                                   down=True,
                                   prefix='block{}_'.format(i + 2)))

            # middle flow
            for i in range(8):
                self.features.add(
                    XceptionModule(channels=728,
                                   in_channels=728,
                                   num_dev=num_dev,
                                   pre_relu=True,
                                   down=False,
                                   prefix='block{}_'.format(i + 5)))

            # exit flow
            self.features.add(
                XceptionExitModule(out_channels=1024,
                                   mid_channels=728,
                                   in_channels=728,
                                   num_dev=num_dev,
                                   pre_relu=True,
                                   down=True,
                                   prefix='block13_'))
            self.features.add(
                XceptionExitModule(out_channels=2048,
                                   mid_channels=1536,
                                   in_channels=1024,
                                   num_dev=num_dev,
                                   pre_relu=False,
                                   down=False,
                                   prefix='block14_'))
            self.features.add(nn.Activation('relu'))

            self.output = HybridSequential(prefix='')
            self.output.add(nn.GlobalAvgPool2D())
            self.output.add(nn.Flatten())
            self.output.add(nn.Dense(num_classes))
示例#12
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False,latent=256, usetanh = False ):
        super(Encoder, self).__init__()
        self.model = HybridSequential()
        kernel_size = 5
        padding = 0 #int(np.ceil((kernel_size - 1) / 2))
        self.model.add(Conv2D(channels=32, kernel_size=5, strides=2,
                              padding=2, in_channels=in_channels))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=32))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                              padding=2, in_channels=64))
        self.model.add(LeakyReLU(alpha=0.2))
        self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                              padding=2, in_channels=128))
        self.model.add(LeakyReLU(alpha=0.2))

        self.model.add(gluon.nn.Dense(latent))
        self.model.add(LeakyReLU(alpha=0.2))
示例#13
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False, istest = False, isthreeway = False):
        super(Discriminator, self).__init__()
        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=32, kernel_size=5, strides=2,
                                  padding=2, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=32))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=64, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=64))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=128, kernel_size=5, strides=2,
                                  padding=2, in_channels=128))
            self.model.add(LeakyReLU(alpha=0.2))

            self.model.add(gluon.nn.Dense(1))

            if isthreeway:
                self.model.add(gluon.nn.Dense(3))
            # elif use_sigmoid:
            self.model.add(Activation(activation='sigmoid'))
class AttentionalAggregator(RelationalAggregator):
    def __init__(self, in_units, activation=LeakyReLU(0.2), **kwargs):
        super().__init__(**kwargs)

        with self.name_scope():
            self.coefficient_net = HybridSequential()

            dense = Dense(in_units=in_units,
                          units=1,
                          use_bias=False,
                          flatten=False)
            self.coefficient_net.add(dense)
            self.coefficient_net.add(activation)

        self.softmax = MaskedSoftmax(axis=1, keepdims=True)

    def hybrid_forward(self, F, X, Z, M):
        """ X is the concatenation of source, edge, and target
            features of an edge. Z is the fused representation.
            M is a mask over the neighborhood.
        """
        coefficient = self.coefficient_net(X)
        attention_weight = self.softmax(coefficient, M)

        return F.sum(attention_weight * Z, axis=1)

    def get_args(self, X, Z, M, *args):
        return X, Z, M
示例#15
0
    def __init__(self, net=None, version=None, anchors=None, target_size=None,
                 ctx=mx.cpu()):
        super(AnchorOffstNet, self).__init__()
        self._net = net

        features = []
        strides = []
        darknet_output = get_darknet(version, pretrained=False, ctx=mx.cpu(), dummy=True)(
            mx.nd.random_uniform(low=0, high=1, shape=(1, 3, target_size[0], target_size[1]), ctx=mx.cpu()))
        for out in darknet_output:  # feature_14, feature_24, feature_28
            out_height, out_width = out.shape[2:]
            features.append([out_width, out_height])
            strides.append([target_size[1] // out_width, target_size[0] // out_height])

        features = features[::-1]
        strides = strides[::-1]  # deep -> middle -> shallow 순으로 !!!
        anchors = OrderedDict(anchors)
        anchors = list(anchors.values())[::-1]
        self._numoffst = len(anchors)

        with self.name_scope():
            self._anchor_generators = HybridSequential()
            for i, anchor, feature, stride in zip(range(len(features)), anchors, features, strides):
                self._anchor_generators.add(YoloAnchorGenerator(i, anchor, feature, stride))

        self._anchor_generators.initialize(ctx=ctx)
    def __init__(self,
                 inner_channels,
                 outer_channels,
                 inner_block=None,
                 innermost=False,
                 outermost=False,
                 use_dropout=False,
                 use_bias=False,
                 final_out=3):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=final_out,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)    
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu1 = LeakyReLU(alpha=0.2)
            
            conv2 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            conv3 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm3 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu3 = LeakyReLU(alpha=0.2)


            res_block = [conv1, norm1, relu1, conv2, norm2, relu2,conv3, norm3, relu3]

            self.se = nn.HybridSequential(prefix='')
            self.se.add(nn.Dense(outer_channels // 16, use_bias=False))
            self.se.add(nn.Activation('relu'))
            self.se.add(nn.Dense(outer_channels, use_bias=False))
            self.se.add(nn.Activation('sigmoid'))
            
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
示例#18
0
class Res_Block(HybridBlock):
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)    
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu1 = LeakyReLU(alpha=0.2)
            
            conv2 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)
            conv3 = Conv2D(channels=outer_channels, kernel_size=3, strides=1, padding=1,
                           in_channels=outer_channels, use_bias=use_bias)
            norm3 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu3 = LeakyReLU(alpha=0.2)


            res_block = [conv1, norm1, relu1, conv2, norm2, relu2, conv3, norm3, relu3]
          
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)

    def hybrid_forward(self, F, x):
        residual = x
        x = self.res(x)
        x = x + residual
        return x
示例#19
0
    def __init__(self, name, channels=1, fc0=256, bn_mom=0.9, act_type="relu"):
        """
        Definition of the value head proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param fc0: Number of units in Dense/Fully-Connected layer
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        """

        super(_ValueHeadAlphaZero, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(
                Conv2D(channels=channels, kernel_size=(1, 1), use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(Flatten())
            self.body.add(Dense(units=fc0))
            self.body.add(get_act(act_type))
            self.body.add(Dense(units=1))
            self.body.add(get_act("tanh"))
    def __init__(self, opts):
        super(DenseMultipathNet, self).__init__()
        opts.units = opts.units[:opts.num_stage]
        assert (len(opts.units) == opts.num_stage)

        num_filters = opts.init_channels
        num_filters_list = []
        for stage in range(opts.num_stage):
            num_filters += opts.units[stage] * opts.growth_rate
            num_filters = int(floor(num_filters * opts.reduction))
            num_filters_list.append(num_filters)

        self.net = HybridSequential()
        with self.net.name_scope():
            self.blocks = EncoderDecoderUnit(opts, num_filters_list[opts.num_stage-1], opts.num_stage-1, innermost=True)
            for stage in range(opts.num_stage-2, -1, -1):
                self.blocks = EncoderDecoderUnit(opts, num_filters_list[stage], stage, inner_block=self.blocks)
            self.net.add(FirstBlock(opts))
            self.net.add(self.blocks)
            self.net.add(ResDBlock(opts, num_filters=16))
            if opts.norm_type is 'batch':
                self.net.add(NormLayer())
            elif opts.norm_type is 'group':
                self.net.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.net.add(InstanceNorm())

            if opts.activation in ['leaky']:
                self.net.add(LeakyReLU(opts.alpha))
            else:
                self.net.add(Activation(opts.activation))
            self.net.add(Conv3D(kernel_size=(1, 1, 1), channels=2, use_bias=opts.use_bias))
            self.net.add(Softmax())
示例#21
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False,latent=256, usetanh = False ):
            super(Encoder, self).__init__()
	    usetanh = True
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 #int(np.ceil((kernel_size - 1) / 2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))
            nf_mult = 2;
            nf_mult_prev = 1;

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = 2 ** n
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult, use_global_stats=istest))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = 2 ** n_layers
            self.model.add(Conv2D(channels=latent, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels =latent, use_global_stats=istest))
            if usetanh:
                self.model.add(Activation(activation='tanh'))
            else:
                self.model.add(LeakyReLU(alpha=0.2))
示例#22
0
    def __init__(self,
                 opts,
                 num_filters,
                 stage,
                 inner_block=None,
                 innermost=False):
        super(EncoderDecoderUnit, self).__init__()

        factor = 2 if stage == 0 else 1
        encoder = EncoderBlock(opts,
                               opts.units[stage],
                               num_filters,
                               trans_block=False if stage == 0 else True)
        decoder = DecoderBlock(opts,
                               num_filters,
                               res_block=(not innermost),
                               factor=factor)
        if innermost:
            model = [encoder, decoder]
        else:
            model = [encoder, inner_block, decoder]

        self.net = HybridSequential()
        for block in model:
            self.net.add(block)
示例#23
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_bias=False, istest=False, latent=256, usetanh = False ):
            super(Decoder, self).__init__()
            self.model = HybridSequential()
            kernel_size = 5
            padding = 0 
	    nf_mult = 2 ** n_layers
            self.model.add(Conv2DTranspose(channels=ndf * nf_mult/2, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=latent,
                                           use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='relu'))
            for n in range(1, n_layers):
                nf_mult = nf_mult / 2
                self.model.add(Conv2DTranspose(channels=ndf * nf_mult / 2, kernel_size=kernel_size, strides=2,
                                               padding=padding, in_channels=ndf * nf_mult,
                                               use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult / 2, use_global_stats=istest))
                #self.model.add(LeakyReLU(alpha=0.2))
                if n==2:
                      self.model.add(Dropout(rate=0.5))
                self.model.add(Activation(activation='relu'))
            self.model.add(Conv2DTranspose(channels=in_channels, kernel_size=kernel_size, strides=2,
                                           padding=padding, in_channels=ndf))

            #self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Activation(activation='tanh'))
示例#24
0
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            relu1 = LeakyReLU(alpha=0.2)
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            conv2 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            res_block = [conv1, norm1, relu1, conv2, norm2, relu2]
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
示例#25
0
    def __init__(self,
                 name,
                 channels,
                 bn_mom=0.9,
                 act_type="relu",
                 se_type=None):
        """
        Definition of the stem proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        """

        super(_StemRise, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            # add all layers to the stem
            self.body.add(
                Conv2D(channels=channels,
                       kernel_size=(3, 3),
                       padding=(1, 1),
                       use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
示例#26
0
 def __init__(self, channels, strides, in_channels=0):
     super(Bottleneck, self).__init__()
     self.body = HybridSequential(prefix="")
     self.body.add(
         nn.Conv2D(channels=channels // 4, kernel_size=1, strides=1))
     self.body.add(nn.BatchNorm())
     self.body.add(nn.Activation('relu'))
     self.body.add(
         nn.Conv2D(channels=channels // 4,
                   kernel_size=3,
                   strides=strides,
                   padding=1,
                   use_bias=False,
                   in_channels=channels // 4))
     self.body.add(nn.BatchNorm())
     self.body.add(nn.Activation('relu'))
     self.body.add(nn.Conv2D(channels, kernel_size=1, strides=1))
     self.body.add(nn.BatchNorm())
     self.downsample = nn.HybridSequential()
     self.downsample.add(
         nn.Conv2D(channels=channels,
                   kernel_size=1,
                   strides=strides,
                   use_bias=False,
                   in_channels=in_channels))
     self.downsample.add(nn.BatchNorm())
示例#27
0
    def __init__(self, name, channels=1, fc0=256, bn_mom=0.9, act_type="relu"):
        """
        Definition of the value head. Same as alpha zero authors but changed order Batch-Norm with RElu.

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param fc0: Number of units in Dense/Fully-Connected layer
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param se_type: SqueezeExcitation type choose either [None, "cSE", "sSE", csSE"] for no squeeze excitation,
        channelwise squeeze excitation, channel-spatial-squeeze-excitation, respectively
        """

        super(_ValueHeadRise, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(
                Conv2D(channels=channels, kernel_size=(1, 1), use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(Flatten())
            self.body.add(Dense(units=fc0))
            self.body.add(get_act(act_type))
            self.body.add(Dense(units=1))
            self.body.add(get_act("tanh"))
示例#28
0
    def __init__(self,
                 base=18,
                 heads=OrderedDict(),
                 head_conv_channel=64,
                 pretrained=True,
                 root=os.path.join(os.getcwd(), 'models'),
                 use_dcnv2=False,
                 ctx=mx.cpu()):
        super(CenterNet, self).__init__()

        with self.name_scope():
            self._base_network = get_upconv_resnet(base=base,
                                                   pretrained=pretrained,
                                                   root=root,
                                                   use_dcnv2=use_dcnv2,
                                                   ctx=ctx)
            self._heads = HybridSequential('heads')
            for name, values in heads.items():
                head = HybridSequential(name)
                num_output = values['num_output']
                bias = values.get('bias', 0.0)
                head.add(
                    Conv2D(head_conv_channel,
                           kernel_size=(3, 3),
                           padding=(1, 1),
                           use_bias=True))
                head.add(Activation('relu'))
                head.add(
                    Conv2D(num_output,
                           kernel_size=(1, 1),
                           use_bias=True,
                           bias_initializer=mx.init.Constant(bias)))
                self._heads.add(head)
        self._heads.initialize(ctx=ctx)
示例#29
0
class VGGAtrousBase(HybridBlock):
    def __init__(self, layers, filters):
        super(VGGAtrousBase, self).__init__()
        with self.name_scope():
            '''
            # caffe에서 가져온 pre-trained weights를 사용하기 때문에, 아래와 같은 init_scale가 필요하다고 함
            -> caffe의 pre-trained model은 입력 scale이 0 ~ 255임 
            '''
            init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape(
                (1, 3, 1, 1)) * 255
            self.init_scale = self.params.get_constant('init_scale',
                                                       init_scale)

            # layers : [2, 2, 3, 3, 3], filters [64, 128, 256, 512, 512])
            self.stages = HybridSequential()
            for layer, filter in zip(layers, filters):
                stage = HybridSequential(prefix='')
                with stage.name_scope():
                    for _ in range(layer):
                        stage.add(
                            Conv2D(filter,
                                   kernel_size=3,
                                   padding=1,
                                   weight_initializer=mx.init.Xavier(
                                       rnd_type='gaussian',
                                       factor_type='out',
                                       magnitude=3),
                                   bias_initializer='zeros'))
                        stage.add(Activation('relu'))
                self.stages.add(stage)

            # fc6, fc7 to dilated convolution layer - hybrid_forward에서 pooling 진행
            stage = HybridSequential(prefix='dilated_')
            with stage.name_scope():
                # conv6(fc6) - dilated
                stage.add(
                    Conv2D(1024,
                           kernel_size=3,
                           padding=6,
                           dilation=6,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

                # conv7(fc7)
                stage.add(
                    Conv2D(1024,
                           kernel_size=1,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

            self.stages.add(stage)
            self.norm4 = Normalize(n_channel=filters[3], initial=20, eps=1e-5)
示例#30
0
    def __init__(self, n_labels=2272, channels=256, num_res_blocks=19, value_fc_size=256,  bn_mom=0.9, act_type='relu', **kwargs):
        """
        Creates the alpha zero gluon net description based on the given parameters.

        :param n_labels: Number of labels the for the policy
        :param channels: Used for all convolution operations. (Except the last 2)
        :param num_res_blocks: Number of residual blocks to stack. In the paper they used 19 or 39 residual blocks
        :param value_fc_size: Fully Connected layer size. Used for the value output
        :param bn_mom: Batch normalization momentum
        :return: gluon net description
        """

        super(AlphaZeroResnet, self).__init__(**kwargs, prefix='')

        self.body = HybridSequential(prefix='')

        with self.name_scope():
            self.body.add(_StemAlphaZero(name='stem', channels=channels, bn_mom=bn_mom, act_type=act_type))

        for i in range(num_res_blocks):
            unit_name = 'unit%d' % (i + 1)
            self.body.add(ResidualBlock(channels, bn_mom, act_type, unit_name=unit_name))

        # create the two heads which will be used in the hybrid fwd pass
        self.value_head = _ValueHeadAlphaZero('value', 1, value_fc_size, bn_mom, act_type)
        self.policy_head = _PolicyHeadAlphaZero('policy', 2, n_labels, bn_mom, act_type)
class UnetSkipUnit(HybridBlock):
    def __init__(self, inner_channels, outer_channels, inner_block=None, innermost=False, outermost=False,
                 use_dropout=False, use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels, kernel_size=4, strides=2, padding=1,
                             in_channels=outer_channels, use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels, use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2, use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)

    def hybrid_forward(self, F, x):
        if self.outermost:
            return self.model(x)
        else:
            return F.concat(self.model(x), x, dim=1)