Exemple #1
0
class _PolicyHeadAlphaZero(HybridBlock):
    def __init__(self, name, channels=2, n_labels=4992, bn_mom=0.9, act_type='relu'):
        """
        Definition of the value head proposed by the alpha zero authors

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        """

        super(_PolicyHeadAlphaZero, self).__init__(prefix=name+'_')

        self.body = HybridSequential(prefix='')

        with self.name_scope():
            self.body.add(Conv2D(channels=channels, kernel_size=(1, 1), use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(Flatten())
            self.body.add(Dense(units=n_labels))

    def hybrid_forward(self, F, x):
        """
        Compute forward pass

        :param F: Handle
        :param x: Input data to the block
        :return: Activation maps of the block
        """
        out = self.body(x)

        return out
class EncoderDecoderUnit(HybridBlock):
    """Return a recursive pair of encoder - decoder"""
    def __init__(self,
                 opts,
                 num_filters,
                 stage,
                 inner_block=None,
                 innermost=False):
        super(EncoderDecoderUnit, self).__init__()

        factor = 2 if stage == 0 else 1
        encoder = EncoderBlock(opts,
                               opts.units[stage],
                               num_filters,
                               trans_block=False if stage == 0 else True)
        decoder = DecoderBlock(opts,
                               num_filters,
                               res_block=(not innermost),
                               factor=factor)
        if innermost:
            model = [encoder, decoder]
        else:
            model = [encoder, inner_block, decoder]

        self.net = HybridSequential()
        for block in model:
            self.net.add(block)

    def hybrid_forward(self, F, x, *args, **kwargs):
        """Forward"""
        out = F.concat(x, self.net(x))
        return out
Exemple #3
0
 def __init__(self, layers, filters, extras):
     super(VGGAtrousExtractor, self).__init__(layers, filters)
     '''
     extra_spec = {
     300: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 1, 0)),
           ((128, 1, 1, 0), (256, 3, 1, 0))],
 
     512: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 4, 1, 1))],
     '''
     # out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
     with self.name_scope():
         self.extras = HybridSequential()
         for i, config in enumerate(extras):
             extra = HybridSequential(prefix='extra%d_' % (i))
             with extra.name_scope():
                 for channels, kernel, strides, padding in config:
                     extra.add(
                         Conv2D(channels=channels,
                                kernel_size=kernel,
                                strides=strides,
                                padding=padding,
                                weight_initializer=mx.init.Xavier(
                                    rnd_type='gaussian',
                                    factor_type='out',
                                    magnitude=3),
                                bias_initializer='zeros'))
                     extra.add(Activation('relu'))
             self.extras.add(extra)
Exemple #4
0
class Res_Block(HybridBlock):
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            relu1 = LeakyReLU(alpha=0.2)
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            conv2 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            res_block = [conv1, norm1, relu1, conv2, norm2, relu2]
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)

    def hybrid_forward(self, F, x):
        residual = x
        x = self.res(x)
        x = x + residual
        return x
Exemple #5
0
def test_autolog_persists_manually_created_run():
    kiwi.gluon.autolog()

    data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")

    with kiwi.start_run() as run:

        model = HybridSequential()
        model.add(Dense(64, activation="relu"))
        model.add(Dense(64, activation="relu"))
        model.add(Dense(10))
        model.initialize()
        model.hybridize()
        trainer = Trainer(model.collect_params(),
                          "adam",
                          optimizer_params={
                              "learning_rate": .001,
                              "epsilon": 1e-07
                          })
        est = estimator.Estimator(net=model,
                                  loss=SoftmaxCrossEntropyLoss(),
                                  metrics=Accuracy(),
                                  trainer=trainer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(data, epochs=3)

        assert kiwi.active_run().info.run_id == run.info.run_id
def gluon_random_data_run():
    mlflow.gluon.autolog()

    with mlflow.start_run() as run:
        data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
        validation = DataLoader(LogsDataset(),
                                batch_size=128,
                                last_batch="discard")

        model = HybridSequential()
        model.add(Dense(64, activation="relu"))
        model.add(Dense(64, activation="relu"))
        model.add(Dense(10))
        model.initialize()
        model.hybridize()
        trainer = Trainer(model.collect_params(),
                          "adam",
                          optimizer_params={
                              "learning_rate": .001,
                              "epsilon": 1e-07
                          })
        est = estimator.Estimator(net=model,
                                  loss=SoftmaxCrossEntropyLoss(),
                                  metrics=Accuracy(),
                                  trainer=trainer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(data, epochs=3, val_data=validation)

    return client.get_run(run.info.run_id)
def test_autolog_persists_manually_created_run():
    mlflow.gluon.autolog()

    data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")

    with mlflow.start_run() as run:

        model = HybridSequential()
        model.add(Dense(64, activation="relu"))
        model.add(Dense(64, activation="relu"))
        model.add(Dense(10))
        model.initialize()
        model.hybridize()
        trainer = Trainer(
            model.collect_params(),
            "adam",
            optimizer_params={
                "learning_rate": 0.001,
                "epsilon": 1e-07
            },
        )
        est = get_estimator(model, trainer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(data, epochs=3)

        assert mlflow.active_run().info.run_id == run.info.run_id
def test_autolog_registering_model():
    registered_model_name = "test_autolog_registered_model"
    mlflow.gluon.autolog(registered_model_name=registered_model_name)

    data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")

    model = HybridSequential()
    model.add(Dense(64, activation="relu"))
    model.add(Dense(10))
    model.initialize()
    model.hybridize()

    trainer = Trainer(model.collect_params(),
                      "adam",
                      optimizer_params={
                          "learning_rate": 0.001,
                          "epsilon": 1e-07
                      })
    est = get_estimator(model, trainer)

    with mlflow.start_run(), warnings.catch_warnings():
        warnings.simplefilter("ignore")
        est.fit(data, epochs=3)

        registered_model = MlflowClient().get_registered_model(
            registered_model_name)
        assert registered_model.name == registered_model_name
Exemple #9
0
def test_autolog_ends_auto_created_run():
    mlflow.gluon.autolog()

    data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")

    model = HybridSequential()
    model.add(Dense(64, activation="relu"))
    model.add(Dense(64, activation="relu"))
    model.add(Dense(10))
    model.initialize()
    model.hybridize()

    trainer = Trainer(model.collect_params(),
                      "adam",
                      optimizer_params={
                          "learning_rate": 0.001,
                          "epsilon": 1e-07
                      })
    est = estimator.Estimator(net=model,
                              loss=SoftmaxCrossEntropyLoss(),
                              trainer=trainer,
                              **get_metrics())

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        est.fit(data, epochs=3)

    assert mlflow.active_run() is None
Exemple #10
0
class VGGAtrousBase(HybridBlock):
    def __init__(self, layers, filters):
        super(VGGAtrousBase, self).__init__()
        with self.name_scope():
            '''
            # caffe에서 가져온 pre-trained weights를 사용하기 때문에, 아래와 같은 init_scale가 필요하다고 함
            -> caffe의 pre-trained model은 입력 scale이 0 ~ 255임 
            '''
            init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape(
                (1, 3, 1, 1)) * 255
            self.init_scale = self.params.get_constant('init_scale',
                                                       init_scale)

            # layers : [2, 2, 3, 3, 3], filters [64, 128, 256, 512, 512])
            self.stages = HybridSequential()
            for layer, filter in zip(layers, filters):
                stage = HybridSequential(prefix='')
                with stage.name_scope():
                    for _ in range(layer):
                        stage.add(
                            Conv2D(filter,
                                   kernel_size=3,
                                   padding=1,
                                   weight_initializer=mx.init.Xavier(
                                       rnd_type='gaussian',
                                       factor_type='out',
                                       magnitude=3),
                                   bias_initializer='zeros'))
                        stage.add(Activation('relu'))
                self.stages.add(stage)

            # fc6, fc7 to dilated convolution layer - hybrid_forward에서 pooling 진행
            stage = HybridSequential(prefix='dilated_')
            with stage.name_scope():
                # conv6(fc6) - dilated
                stage.add(
                    Conv2D(1024,
                           kernel_size=3,
                           padding=6,
                           dilation=6,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

                # conv7(fc7)
                stage.add(
                    Conv2D(1024,
                           kernel_size=1,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

            self.stages.add(stage)
            self.norm4 = Normalize(n_channel=filters[3], initial=20, eps=1e-5)
Exemple #11
0
def get_gluon_random_data_run(log_models=True):
    mlflow.gluon.autolog(log_models)

    with mlflow.start_run() as run:
        data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
        validation = DataLoader(LogsDataset(),
                                batch_size=128,
                                last_batch="discard")

        model = HybridSequential()
        model.add(Dense(64, activation="relu"))
        model.add(Dense(64, activation="relu"))
        model.add(Dense(10))
        model.initialize()
        model.hybridize()
        trainer = Trainer(
            model.collect_params(),
            "adam",
            optimizer_params={
                "learning_rate": 0.001,
                "epsilon": 1e-07
            },
        )
        est = get_estimator(model, trainer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(data, epochs=3, val_data=validation)
    client = mlflow.tracking.MlflowClient()
    return client.get_run(run.info.run_id)
class AttentionalAggregator(RelationalAggregator):
    def __init__(self, in_units, activation=LeakyReLU(0.2), **kwargs):
        super().__init__(**kwargs)

        with self.name_scope():
            self.coefficient_net = HybridSequential()

            dense = Dense(in_units=in_units,
                          units=1,
                          use_bias=False,
                          flatten=False)
            self.coefficient_net.add(dense)
            self.coefficient_net.add(activation)

        self.softmax = MaskedSoftmax(axis=1, keepdims=True)

    def hybrid_forward(self, F, X, Z, M):
        """ X is the concatenation of source, edge, and target
            features of an edge. Z is the fused representation.
            M is a mask over the neighborhood.
        """
        coefficient = self.coefficient_net(X)
        attention_weight = self.softmax(coefficient, M)

        return F.sum(attention_weight * Z, axis=1)

    def get_args(self, X, Z, M, *args):
        return X, Z, M
Exemple #13
0
class _RiseResidualBlock(HybridBlock):  # Too many arguments (7/5)
    """
    Definition of a residual block without any pooling operation
    """

    def __init__(self, channels, bn_mom, act_type, unit_name, use_se=True, res_scale_fac=0.2):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(_RiseResidualBlock, self).__init__(unit_name)
        self.act_type = act_type
        self.unit_name = unit_name
        self.res_scale_fac = res_scale_fac
        self.use_se = use_se
        # branch 0
        self.body = HybridSequential()
        self.body.add(
            Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False, prefix="%s_conv0" % unit_name)
        )
        self.body.add(BatchNorm(momentum=bn_mom, prefix="%s_bn0" % self.unit_name))
        self.body.add(get_act(act_type, prefix="%s_%s0" % (unit_name, act_type)))
        self.body.add(
            Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False, prefix="%s_conv1" % unit_name)
        )
        self.body.add(BatchNorm(momentum=bn_mom, prefix="%s_bn1" % self.unit_name))
        self.act0 = get_act(act_type, prefix="%s_%s1" % (unit_name, act_type))

        if use_se:
            self.se0 = _ChannelSqueezeExcitation("%s_se0" % unit_name, channels, 16, act_type)

    def hybrid_forward(self, F, x):
        """
        Compute forward pass

        :param F: Handle
        :param x: Input data to the block
        :return: Activation maps of the block
        """

        shortcut = x
        out = self.body(shortcut)
        # if self.shortcut is True:
        # scale down the output of the residual block activations to stabilize training
        if self.res_scale_fac:
            out = out.__mul__(self.res_scale_fac)
        # connect the shortcut with the residual activations
        out = F.broadcast_add(shortcut, out, name=self.unit_name)
        # apply batchnormalization and activation
        # out = self.bn0(out)
        out = self.act0(out)
        # apply squeeze excitation
        if self.use_se:
            out = self.se0(out)

        return out
Exemple #14
0
class AnchorOffstNet(HybridBlock):
    def __init__(self,
                 net=None,
                 version=None,
                 anchors=None,
                 target_size=None,
                 ctx=mx.cpu()):
        super(AnchorOffstNet, self).__init__()
        self._net = net

        features = []
        strides = []
        darknet_output = get_darknet(version,
                                     pretrained=False,
                                     ctx=mx.cpu(),
                                     dummy=True)(mx.nd.random_uniform(
                                         low=0,
                                         high=1,
                                         shape=(1, 3, target_size[0],
                                                target_size[1]),
                                         ctx=mx.cpu()))
        for out in darknet_output:  # feature_14, feature_24, feature_28
            out_height, out_width = out.shape[2:]
            features.append([out_width, out_height])
            strides.append(
                [target_size[1] // out_width, target_size[0] // out_height])

        features = features[::-1]
        strides = strides[::-1]  # deep -> middle -> shallow 순으로 !!!
        anchors = OrderedDict(anchors)
        anchors = list(anchors.values())[::-1]
        self._numoffst = len(anchors)

        with self.name_scope():
            self._anchor_generators = HybridSequential()
            for i, anchor, feature, stride in zip(range(len(features)),
                                                  anchors, features, strides):
                self._anchor_generators.add(
                    YoloAnchorGenerator(i, anchor, feature, stride))

        self._anchor_generators.initialize(ctx=ctx)

    def hybrid_forward(self, F, x):

        output82, output94, output106 = self._net(x)

        anchors = []
        offsets = []
        strides = []
        for i in range(self._numoffst):
            anchor, offset, stride = self._anchor_generators[i](x)
            anchors.append(anchor)
            offsets.append(offset)
            strides.append(stride)

        return output82, output94, output106, \
               anchors[0], anchors[1], anchors[2], \
               offsets[0], offsets[1], offsets[2], \
               strides[0], strides[1], strides[2]
Exemple #15
0
class AlphaZeroResnet(HybridBlock):
    def __init__(self,
                 n_labels=2272,
                 channels=256,
                 num_res_blocks=19,
                 value_fc_size=256,
                 bn_mom=0.9,
                 act_type="relu",
                 **kwargs):
        """
        Creates the alpha zero gluon net description based on the given parameters.

        :param n_labels: Number of labels the for the policy
        :param channels: Used for all convolution operations. (Except the last 2)
        :param num_res_blocks: Number of residual blocks to stack. In the paper they used 19 or 39 residual blocks
        :param value_fc_size: Fully Connected layer size. Used for the value output
        :param bn_mom: Batch normalization momentum
        :return: gluon net description
        """

        super(AlphaZeroResnet, self).__init__(**kwargs, prefix="")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(
                _StemAlphaZero(name="stem",
                               channels=channels,
                               bn_mom=bn_mom,
                               act_type=act_type))

        for i in range(num_res_blocks):
            unit_name = "unit%d" % (i + 1)
            self.body.add(
                ResidualBlock(channels, bn_mom, act_type, unit_name=unit_name))

        # create the two heads which will be used in the hybrid fwd pass
        self.value_head = _ValueHeadAlphaZero("value", 1, value_fc_size,
                                              bn_mom, act_type)
        self.policy_head = _PolicyHeadAlphaZero("policy", 2, n_labels, bn_mom,
                                                act_type)

    def hybrid_forward(self, F, x):
        """
        Implemntation of the forward pass of the full network
        Uses a broadcast add operation for the shortcut and the output of the residual block
        :param F: Abstract Function handle which works for gluon & mxnet
        :param x: Input to the ResidualBlock
        :return: Value & Policy Output
        """
        out = self.body(x)

        value = self.value_head(out)
        policy = self.policy_head(out)

        return [value, policy]
Exemple #16
0
class ResidualBlock(HybridBlock):
    """
    Definition of a residual block without any pooling operation
    """
    def __init__(self, channels, bn_mom, act_type, unit_name):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlock, self).__init__()
        self.act_type = act_type
        self.unit_name = unit_name

        self.body = HybridSequential()

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv0" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn0" % self.unit_name))
        self.body.add(
            Activation(self.act_type,
                       prefix="%s_%s0" % (self.unit_name, self.act_type)))

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv1" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn1" % self.unit_name))

    def hybrid_forward(self, F, x):
        """
        Implemntation of the forward pass of the residual block.
        Uses a broadcast add operation for the shortcut and the output of the residual block
        :param F: Abstract Function handle which works for gluon & mxnet
        :param x: Input to the ResidualBlock
        :return: Sum of the shortcut and the computed residual block computation
        """
        shortcut = x
        out = self.body(shortcut)
        out = F.Activation(shortcut + out,
                           act_type=self.act_type,
                           name="%s_BroadcastAdd_%s" %
                           (self.unit_name, self.act_type))

        return out
Exemple #17
0
class DefaultRouter(HybridBlock):
    def __init__(self, num_experts):
        super(DefaultRouter, self).__init__()
        with self.name_scope():
            self.body = HybridSequential(prefix='')
            self.body.add(GlobalAvgPool2D())
            # self.body.add(Dense(num_experts//4, activation='relu'))
            self.body.add(Dense(num_experts, activation='sigmoid'))

    def hybrid_forward(self, F, x):
        return self.body(x)
Exemple #18
0
def features(A, in_units, hidden_layer):
    features = HybridSequential()
    with features.name_scope():
        for i, (layer_size, activation_func) in enumerate(hidden_layer):
            layer = GraphConvolution(A,
                                     in_units=in_units,
                                     out_units=layer_size,
                                     activation=activation_func)
            features.add(layer)
            in_units = layer_size

    return features, in_units
class FirstBlock(HybridBlock):
    """Return FirstBlock for building DenseNet"""
    def __init__(self, opts):
        super(FirstBlock, self).__init__()
        self.fblock = HybridSequential()
        self.fblock.add(Conv3D(channels=opts.init_channels, kernel_size=(opts.zKernelSize, 7, 7),
                               strides=(opts.zStride, 1, 1), padding=(opts.zPad, 3, 3), use_bias=opts.use_bias))

        # self.fblock.add(BatchNorm())
        # self.fblock.add(Activation(opts.activation))

    def hybrid_forward(self, F, x, *args, **kwargs):
        return self.fblock(x)
Exemple #20
0
class UnetSkipUnit(HybridBlock):
    def __init__(self, inner_channels, outer_channels, inner_block=None, innermost=False, outermost=False,
                 use_dropout=False, use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels, kernel_size=4, strides=2, padding=1,
                             in_channels=outer_channels, use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels, use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2, use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)

    def hybrid_forward(self, F, x):  # 是不是HybridBlock对应的是hybrid_forward前项函数
        """

        :param F: 这里F指mx.nd 或者是mx.sym的一个变量
        :param x: 输入数据
        :return:
        """
        if self.outermost:
            return self.model(x)
        else:
            return F.concat(self.model(x), x, dim=1)   # 将输入的特征与网络得到的特征进行拼接,参考unet的网络结构
Exemple #21
0
class _ChannelSqueezeExcitation(HybridBlock):
    def __init__(self, name, nb_act_maps, ratio=16, act_type="relu"):

        super(_ChannelSqueezeExcitation, self).__init__(prefix=name)

        self.nb_act_maps = nb_act_maps
        self.body = HybridSequential(prefix="")

        nb_units_hidden = nb_act_maps // ratio
        with self.name_scope():
            self.body.add(AvgPool2D(pool_size=8))

            self.body.add(Dense(nb_units_hidden))
            self.body.add(get_act(act_type))
            self.body.add(Dense(nb_act_maps))
            self.body.add(get_act("sigmoid"))

    def hybrid_forward(self, F, x):
        """
        Compute forward pass

        :param F: Handle
        :param x: Input data to the block
        :return: Activation maps of the block
        """
        feature_scaling = self.body(x)
        out = F.broadcast_mul(
            x,
            F.reshape(data=feature_scaling,
                      shape=(-1, self.nb_act_maps, 1, 1)))

        return out
class TransitionBlock(HybridBlock):
    """Return TransitionBlock Unit for building DenseNet
    Parameters
    ----------
    num_stage : int
        Number of stage
    num_filters : int
        Number of output channels
    """
    def __init__(self, opts, num_filters, pool_type='avg'):
        super(TransitionBlock, self).__init__()
        self.pool_type = pool_type
        self.tblock = HybridSequential()
        self.tblock.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
        self.tblock.add(Activation(opts.activation))
        self.tblock.add(
            Conv2D(channels=int(num_filters * opts.reduction),
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   use_bias=opts.use_bias,
                   padding=(0, 0)))
        if opts.drop_out > 0:
            self.tblock.add(Dropout(opts.drop_out))

    def hybrid_forward(self, F, x, *args, **kwargs):
        """Forward"""
        return F.Pooling(self.tblock(x),
                         global_pool=False,
                         kernel=(2, 2),
                         stride=(2, 2),
                         pool_type=self.pool_type)
Exemple #23
0
class AnchorNet(HybridBlock):

    def __init__(self, net=None, version=None, target_size=None,
                 anchor_sizes=[32, 64, 128, 256, 512],
                 anchor_size_ratios=[1, pow(2, 1 / 3), pow(2, 2 / 3)],
                 anchor_aspect_ratios=[0.5, 1, 2],
                 box_offset=(0.5, 0.5),
                 anchor_box_clip=True, ctx=mx.cpu()):
        super(AnchorNet, self).__init__()

        self._net = net
        if version not in [0, 1, 2, 3, 4, 5, 6]:
            raise ValueError

        feature_sizes = []
        bifpn = get_bifpn(version, ctx=mx.cpu(), dummy=True)(
            mx.nd.random_uniform(low=0, high=1, shape=(1, 3, target_size[0], target_size[1]), ctx=mx.cpu()))
        for bif in bifpn:
            feature_sizes.append(bif.shape[2:])  # h, w

        # get_fpn_resnet 외부에서 보내지 않으면. 무조건 forward 한번 시켜야 하는데,
        # 네트워크를 정확히 정의 해놓지 않아서...(default init) 쓸데 없는 코드를
        # 넣어야 한다.
        with self.name_scope():

            # 아래 두줄은 self.name_scope()안에 있어야 한다. - 새롭게 anchor만드는 네크워크를 생성하는 것이므로.!!!
            # self.name_scope() 밖에 있으면 기존의 self._net 과 이름이 겹친다.

            self._bifpn = get_bifpn(version, ctx=ctx, dummy=True)
            self._bifpn.forward(mx.nd.ones(shape=(1, 3) + target_size, ctx=ctx))
            self._anchor_generators = HybridSequential()

            for index, feature_size, anchor_size in zip(range(len(feature_sizes)), feature_sizes, anchor_sizes):
                self._anchor_generators.add(EfficientAnchorGenerator(index=index,
                                                                     input_size=target_size,
                                                                     feature_size=feature_size,
                                                                     anchor_size=anchor_size,
                                                                     anchor_size_ratios=anchor_size_ratios,
                                                                     anchor_aspect_ratios=anchor_aspect_ratios,
                                                                     box_offset=box_offset,
                                                                     box_clip=anchor_box_clip))

        self._anchor_generators.initialize(ctx=ctx)

    def hybrid_forward(self, F, x):
        cls_preds, box_preds = self._net(x)
        anchors = [anchor_generator(bifpn_feature) for bifpn_feature, anchor_generator in
                   zip(self._bifpn(x), self._anchor_generators)]
        anchors = F.reshape(F.concat(*anchors, dim=0), shape=(1, -1, 4))
        return cls_preds, box_preds, anchors
Exemple #24
0
class decoder(HybridBlock):
    def __init__(self,in_channel, out_channel):
        super(decoder,self).__init__()
        with self.name_scope():
            de_conv=Conv2DTranspose(channels=out_channel, kernel_size=4, strides=2, padding=1,
                                          in_channels=in_channel)
            norm = BatchNorm(momentum=0.1, in_channels=out_channel)
            relu = LeakyReLU(alpha=0.2)
        decode=[de_conv,norm,relu]
        self.decoder = HybridSequential()
        with self.decoder.name_scope():
            for block in decode:
                self.decoder.add(block)
    def hybrid_forward(self,F,x):
        return self.decoder(x)
Exemple #25
0
def build_model(A, X):
    model = HybridSequential()

    with model.name_scope():
        features, out_units = build_features(A, X)
        model.add(features)
        logger.info("GCN Summary: \n{}".format(model))

        classifier = LogisticRegressor(out_units)
        model.add(classifier)
        logger.info("GCN + LR Summary: \n{}".format(model))

    model.hybridize()
    model.initialize(Uniform(1))

    return model, features
class label_Discriminator(HybridBlock):
    def __init__(self, in_channels, ndf=1, n_layers=3, use_sigmoid=False, use_bias=False):
        super(label_Discriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 70
            padding = 24
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=8,
                                  padding=padding, in_channels=in_channels, use_bias=use_bias))
            if use_sigmoid:
                self.model.add(Activation(activation='sigmoid'))

    def hybrid_forward(self, F, x):
        out = self.model(x)
        return out
def build_model(A, X):
    model = HybridSequential()
    hidden_layer_specs = [(4, 'tanh'), (2, 'tanh')]
    in_units = in_units = X.shape[1]

    with model.name_scope():
        features, out_units = build_features(A, X)
        model.add(features)

        classifier = LogisticRegressor(out_units)
        model.add(classifier)

    model.hybridize()
    model.initialize(Uniform(1))

    return model, features
def build_features(A, X):
    hidden_layer_specs = [(4, 'tanh'), (2, 'tanh')
                          ]  # Format: (units in layer, activation function)
    in_units = in_units = X.shape[1]

    features = HybridSequential()
    with features.name_scope():
        for i, (layer_size, activation_func) in enumerate(hidden_layer_specs):
            layer = SpectralRule(A,
                                 in_units=in_units,
                                 out_units=layer_size,
                                 activation=activation_func)
            features.add(layer)

            in_units = layer_size
    return features, in_units
Exemple #29
0
class UnetSkipUnit(HybridBlock):

    def __init__(self, inner_channels, outer_channels, inner_blocks=None, inner_most=False, outer_most=False,
                 use_dropout=False, use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outer_most = outer_most
            en_conv = Conv2D(channels=inner_channels, in_channels=outer_channels, kernel_size=4, strides=2, padding=1,
                             use_bias=use_bias)
            en_relu = LeakyReLU(0.2)
            en_norm = BatchNorm(momentum=.1, in_channels=inner_channels)
            de_relu = Activation('relu')
            de_norm = BatchNorm(momentum=.1, in_channels=outer_channels)

            if inner_most:
                de_conv = Conv2DTranspose(channels=outer_channels, in_channels=inner_channels, kernel_size=4, strides=2,
                                          padding=1, use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv]
                model = encoder + decoder
            elif outer_most:
                de_conv = Conv2DTranspose(channels=outer_channels, in_channels=inner_channels * 2, kernel_size=4,
                                          strides=2, padding=1, use_bias=use_bias)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation('tanh')]
                model = encoder + [inner_blocks] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels, in_channels=inner_channels * 2, kernel_size=4,
                                          strides=2, padding=1, use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_blocks] + decoder

            if use_dropout:
                model += [Dropout(0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)

    def hybrid_forward(self, F, x, *args, **kwargs):
        if self.outer_most:
            return self.model(x)
        else:
            return F.concat(self.model(x), x, dim=1)
Exemple #30
0
class CNN(HybridBlock):
    def __init__(self, training=False, **kwargs):
        super(CNN, self).__init__(**kwargs)
        self.cnn = HybridSequential()
        self.cnn.add( # We don't need pooling, since local information matters
            Conv2D(channels=384, kernel_size=3, padding=1, activation='relu'), # Sees 3*3
            Conv2D(channels=256, kernel_size=3, padding=1, activation='relu'), # Sees 5*5
            Dense(units=1024, activation='relu'),
            Dropout(0.2 if training else 0.0),
            Dense(units=512, activation='relu'),
            Dropout(0.2 if training else 0.0),
            Dense(units=256, activation='relu'),
            Dense(1))
        self.cnn.hybridize()

    def hybrid_forward(self, F, x):
        return self.cnn(x)
class UnetSkipUnit(HybridBlock):
    def __init__(self, inner_channels, outer_channels, inner_block=None, innermost=False, outermost=False,
                 use_dropout=False, use_bias=False):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels, kernel_size=4, strides=2, padding=1,
                             in_channels=outer_channels, use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels, use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels, kernel_size=4, strides=2, padding=1,
                                          in_channels=inner_channels * 2, use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)

    def hybrid_forward(self, F, x):
        if self.outermost:
            return self.model(x)
        else:
            return F.concat(self.model(x), x, dim=1)