Exemple #1
0
    def __init__(self, num_dev, num_classes=1000, **kwargs):
        super(Xception, self).__init__(**kwargs)

        with self.name_scope():
            self.features = HybridSequential(prefix='')

            # entry flow
            for i in range(2):
                self.features.add(
                    nn.Conv2D(channels=32 * (i + 1),
                              kernel_size=3,
                              padding=0,
                              strides=2 if i == 0 else 1,
                              use_bias=False))
                self.features.add(SyncBatchNorm(num_devices=num_dev))
                self.features.add(nn.Activation('relu'))

            channels = [64, 128, 256, 728]
            for i in range(len(channels) - 1):
                self.features.add(
                    XceptionModule(channels=channels[i + 1],
                                   in_channels=channels[i],
                                   num_dev=num_dev,
                                   pre_relu=(i != 0),
                                   down=True,
                                   prefix='block{}_'.format(i + 2)))

            # middle flow
            for i in range(8):
                self.features.add(
                    XceptionModule(channels=728,
                                   in_channels=728,
                                   num_dev=num_dev,
                                   pre_relu=True,
                                   down=False,
                                   prefix='block{}_'.format(i + 5)))

            # exit flow
            self.features.add(
                XceptionExitModule(out_channels=1024,
                                   mid_channels=728,
                                   in_channels=728,
                                   num_dev=num_dev,
                                   pre_relu=True,
                                   down=True,
                                   prefix='block13_'))
            self.features.add(
                XceptionExitModule(out_channels=2048,
                                   mid_channels=1536,
                                   in_channels=1024,
                                   num_dev=num_dev,
                                   pre_relu=False,
                                   down=False,
                                   prefix='block14_'))
            self.features.add(nn.Activation('relu'))

            self.output = HybridSequential(prefix='')
            self.output.add(nn.GlobalAvgPool2D())
            self.output.add(nn.Flatten())
            self.output.add(nn.Dense(num_classes))
Exemple #2
0
    def __init__(self,
                 base=18,
                 heads=OrderedDict(),
                 head_conv_channel=64,
                 pretrained=True,
                 root=os.path.join(os.getcwd(), 'models'),
                 use_dcnv2=False,
                 ctx=mx.cpu()):
        super(CenterNet, self).__init__()

        with self.name_scope():
            self._base_network = get_upconv_resnet(base=base,
                                                   pretrained=pretrained,
                                                   root=root,
                                                   use_dcnv2=use_dcnv2,
                                                   ctx=ctx)
            self._heads = HybridSequential('heads')
            for name, values in heads.items():
                head = HybridSequential(name)
                num_output = values['num_output']
                bias = values.get('bias', 0.0)
                head.add(
                    Conv2D(head_conv_channel,
                           kernel_size=(3, 3),
                           padding=(1, 1),
                           use_bias=True))
                head.add(Activation('relu'))
                head.add(
                    Conv2D(num_output,
                           kernel_size=(1, 1),
                           use_bias=True,
                           bias_initializer=mx.init.Constant(bias)))
                self._heads.add(head)
        self._heads.initialize(ctx=ctx)
Exemple #3
0
 def __init__(self,
              channels,
              in_channels,
              num_dev=1,
              pre_relu=True,
              down=True,
              **kwargs):
     super(XceptionModule, self).__init__(**kwargs)
     with self.name_scope():
         self.body = HybridSequential(prefix='body_')
         if pre_relu:
             self.body.add(nn.Activation('relu'))
         self.body.add(_make_separable_conv3(channels, in_channels))
         self.body.add(SyncBatchNorm(num_devices=num_dev))
         self.body.add(nn.Activation('relu'))
         self.body.add(_make_separable_conv3(channels, channels))
         self.body.add(SyncBatchNorm(num_devices=num_dev))
         if down:
             self.body.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
             self.downsample = HybridSequential(prefix='downsample_')
             with self.downsample.name_scope():
                 self.downsample.add(
                     nn.Conv2D(channels,
                               kernel_size=1,
                               strides=2,
                               use_bias=False))
                 self.downsample.add(SyncBatchNorm(num_devices=num_dev))
         else:
             self.body.add(nn.Activation('relu'))
             self.body.add(_make_separable_conv3(channels, channels))
             self.body.add(SyncBatchNorm(num_devices=num_dev))
             self.downsample = None
Exemple #4
0
    def __init__(self,
                 opts,
                 num_filters,
                 stage,
                 inner_block=None,
                 innermost=False):
        super(EncoderDecoderUnit, self).__init__()

        factor = 2 if stage == 0 else 1
        encoder = EncoderBlock(opts,
                               opts.units[stage],
                               num_filters,
                               trans_block=False if stage == 0 else True)
        decoder = DecoderBlock(opts,
                               num_filters,
                               res_block=(not innermost),
                               factor=factor)
        if innermost:
            model = [encoder, decoder]
        else:
            model = [encoder, inner_block, decoder]

        self.net = HybridSequential()
        for block in model:
            self.net.add(block)

        if opts.dense_forward:
            self.dense_forward = HybridSequential()
            self.dense_forward.add(DenseBlock(opts, opts.units[stage]))
        else:
            self.dense_forward = None
Exemple #5
0
 def __init__(self, layers, filters, extras):
     super(VGGAtrousExtractor, self).__init__(layers, filters)
     '''
     extra_spec = {
     300: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 1, 0)),
           ((128, 1, 1, 0), (256, 3, 1, 0))],
 
     512: [((256, 1, 1, 0), (512, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 3, 2, 1)),
           ((128, 1, 1, 0), (256, 4, 1, 1))],
     '''
     # out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
     with self.name_scope():
         self.extras = HybridSequential()
         for i, config in enumerate(extras):
             extra = HybridSequential(prefix='extra%d_' % (i))
             with extra.name_scope():
                 for channels, kernel, strides, padding in config:
                     extra.add(
                         Conv2D(channels=channels,
                                kernel_size=kernel,
                                strides=strides,
                                padding=padding,
                                weight_initializer=mx.init.Xavier(
                                    rnd_type='gaussian',
                                    factor_type='out',
                                    magnitude=3),
                                bias_initializer='zeros'))
                     extra.add(Activation('relu'))
             self.extras.add(extra)
Exemple #6
0
    def __init__(self, layers, filters):
        super(VGGAtrousBase, self).__init__()
        with self.name_scope():
            '''
            # caffe에서 가져온 pre-trained weights를 사용하기 때문에, 아래와 같은 init_scale가 필요하다고 함
            -> caffe의 pre-trained model은 입력 scale이 0 ~ 255임 
            '''
            init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape(
                (1, 3, 1, 1)) * 255
            self.init_scale = self.params.get_constant('init_scale',
                                                       init_scale)

            # layers : [2, 2, 3, 3, 3], filters [64, 128, 256, 512, 512])
            self.stages = HybridSequential()
            for layer, filter in zip(layers, filters):
                stage = HybridSequential(prefix='')
                with stage.name_scope():
                    for _ in range(layer):
                        stage.add(
                            Conv2D(filter,
                                   kernel_size=3,
                                   padding=1,
                                   weight_initializer=mx.init.Xavier(
                                       rnd_type='gaussian',
                                       factor_type='out',
                                       magnitude=3),
                                   bias_initializer='zeros'))
                        stage.add(Activation('relu'))
                self.stages.add(stage)

            # fc6, fc7 to dilated convolution layer - hybrid_forward에서 pooling 진행
            stage = HybridSequential(prefix='dilated_')
            with stage.name_scope():
                # conv6(fc6) - dilated
                stage.add(
                    Conv2D(1024,
                           kernel_size=3,
                           padding=6,
                           dilation=6,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

                # conv7(fc7)
                stage.add(
                    Conv2D(1024,
                           kernel_size=1,
                           weight_initializer=mx.init.Xavier(
                               rnd_type='gaussian',
                               factor_type='out',
                               magnitude=3),
                           bias_initializer='zeros'))
                stage.add(Activation('relu'))

            self.stages.add(stage)
            self.norm4 = Normalize(n_channel=filters[3], initial=20, eps=1e-5)
Exemple #7
0
    def __init__(
        self,
        name,
        in_ch,
        ch_0_0=192,
        ch_1_0=128,
        ch_1_1=224,
        ch_1_2=256,
        ch=2144,
        bn_mom=0.9,
        act_type="relu",
        res_scale_fac=0.2,
        use_se=True,
        shortcut=True,
    ):
        """
        Definition of the InceptionResnetC block

        :param name: name prefix for all blocks
        :param ch_0_0: Number of channels for 1st conv operation in branch 0
        :param ch_1_0: Number of channels for 1st conv operation in branch 1
        :param ch_1_1: Number of channels for 2nd conv operation in branch 1
        :param ch_1_2: Number of channels for 3rd conv operation in branch 1
        :param ch: Number of channels for conv operation after concatenating branches (no act is applied here)
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param res_scale_fac: Constant multiply scalar which is applied to the residual activations maps
        """
        super(_InceptionResnetC, self).__init__(name, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut)
        self.res_scale_fac = res_scale_fac
        self.block_name = name
        self.body = HybridSequential(prefix="")
        self.branches = HybridConcurrent(axis=1, prefix="")  # entry point for all branches
        # branch 0 of block type C
        self.b_0 = HybridSequential()
        self.b_0.add(Conv2D(channels=ch_0_0, kernel_size=(1, 1), prefix="%s_0_conv0" % name, in_channels=in_ch))
        self.b_0.add(get_act(act_type, prefix="%s_0_%s0" % (name, act_type)))
        # branch 2 of block type C
        self.b_1 = HybridSequential()
        self.b_1.add(Conv2D(channels=ch_1_0, kernel_size=(1, 1), prefix="%s_1_conv0" % name, in_channels=in_ch))
        self.b_1.add(get_act(act_type, prefix="%s_2_%s0" % (name, act_type)))
        self.b_1.add(
            Conv2D(channels=ch_1_1, kernel_size=(1, 3), padding=(0, 1), prefix="%s_1_conv1" % name, in_channels=ch_1_0)
        )
        self.b_1.add(get_act(act_type, prefix="%s_2_%s1" % (name, act_type)))
        self.b_1.add(
            Conv2D(channels=ch_1_2, kernel_size=(3, 1), padding=(1, 0), prefix="%s_1_conv2" % name, in_channels=ch_1_1)
        )
        self.b_1.add(get_act(act_type, prefix="%s_1_%s2" % (name, act_type)))
        # concatenate all branches and add them to the body
        self.branches.add(self.b_0)
        self.branches.add(self.b_1)
        self.body.add(self.branches)
        # apply a single CNN layer without activation function
        self.body.add(
            Conv2D(
                channels=ch, kernel_size=(1, 1), prefix="%s_conv0" % name, in_channels=ch_0_0 + ch_1_2, use_bias=False
            )
        )
    def __init__(self,
                 innerblock=None,
                 outer_channels=32,
                 inner_channels=64,
                 use_bias=False):
        super(middlelayer, self).__init__()
        with self.name_scope():
            res_block_1 = Res_Block(outer_channels=outer_channels)
            res_block_2 = Res_Block(outer_channels=inner_channels)
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)

            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)
            de_conv = Conv2DTranspose(channels=outer_channels,
                                      kernel_size=4,
                                      strides=2,
                                      padding=1,
                                      in_channels=inner_channels,
                                      use_bias=use_bias)
            self.p_at = CA_M5(in_channel=inner_channels)
            self.c_at = CA_M4()

            res_block_3 = Res_Block(outer_channels=inner_channels)
            res_block_4 = Res_Block(outer_channels=outer_channels)
            res1 = res_block_1
            encoder = [en_conv, en_norm, en_relu]
            res2 = res_block_2
            res3 = res_block_3
            decoder = [de_conv, de_norm, de_relu]
            res4 = res_block_4

            self.encoder = HybridSequential()
            with self.encoder.name_scope():
                for block in encoder:
                    self.encoder.add(block)

            self.inner_block = innerblock

            self.res1 = res1
            self.res2 = res2
            self.res3 = res3
            self.res4 = res4

            self.decoder = HybridSequential()

            with self.decoder.name_scope():
                for block in decoder:
                    self.decoder.add(block)
Exemple #9
0
 def __init__(self, training=False, **kwargs):
     super(MLP, self).__init__(**kwargs)
     self.layer1 = HybridSequential()
     self.layer1.add(Dense(1024, in_units=25*25, activation="relu"),
                     Dropout(0.1 if training else 0.0))
     self.layer2 = HybridSequential()
     self.layer2.add(Dense(512, activation="relu"),
                     Dropout(0.1 if training else 0.0))
     self.layer3 = Dense(256, activation="relu")
     self.output = Dense(1)
     self.hybridize()
Exemple #10
0
 def __init__(self,
              network,
              add_filters,
              norm_layer=BatchNorm,
              norm_kwargs=None,
              use_bn=False,
              reduce_ratio=1.0,
              min_depth=128,
              **kwargs):
     super(ResNetV1bSSD, self).__init__()
     assert network.endswith('v1b')
     if norm_kwargs is None:
         norm_kwargs = {}
     res = get_model(network, **kwargs)
     weight_init = mx.init.Xavier(rnd_type='gaussian',
                                  factor_type='out',
                                  magnitude=2)
     with self.name_scope():
         self.stage1 = HybridSequential('stage1')
         for l in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2']:
             self.stage1.add(getattr(res, l))
         self.stage2 = HybridSequential('stage2')
         self.stage2.add(res.layer3)
         # set stride from (2, 2) -> (1, 1) in first conv of layer3
         self.stage2[0][0].conv1._kwargs['stride'] = (1, 1)
         # also the residuel path
         self.stage2[0][0].downsample[0]._kwargs['stride'] = (1, 1)
         self.stage2.add(res.layer4)
         self.more_stages = HybridSequential('more_stages')
         for i, num_filter in enumerate(add_filters):
             stage = HybridSequential('more_stages_' + str(i))
             num_trans = max(min_depth,
                             int(round(num_filter * reduce_ratio)))
             stage.add(
                 Conv2D(channels=num_trans,
                        kernel_size=1,
                        use_bias=not use_bn,
                        weight_initializer=weight_init))
             if use_bn:
                 stage.add(norm_layer(**norm_kwargs))
             stage.add(Activation('relu'))
             padding = 0 if i == len(add_filters) - 1 else 1
             stage.add(
                 Conv2D(channels=num_filter,
                        kernel_size=3,
                        strides=2,
                        padding=padding,
                        use_bias=not use_bn,
                        weight_initializer=weight_init))
             if use_bn:
                 stage.add(norm_layer(**norm_kwargs))
             stage.add(Activation('relu'))
             self.more_stages.add(stage)
Exemple #11
0
 def __init__(self):
     super(net2,self).__init__()
     self.net=HybridSequential()       
     with self.net.name_scope():
         
         self.net.add(encoder(3,16))
         self.net.add(encoder(16,32))
     self.att= CA_M2(32)
     self.net1=HybridSequential()
     with self.net1.name_scope():      
         self.net1.add(encoder(32,64))
         self.net1.add(decoder(64,32))
         self.net1.add(decoder(32,16))
         self.net1.add(decoder(16,1))
Exemple #12
0
    def __init__(self, name, in_ch, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut, pool_type):
        super(_RiseBlockB, self).__init__(name, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut)

        self.body = HybridSequential(prefix='')

        # entry point for all branches
        self.branches = HybridConcurrent(axis=1, prefix='')

        ch_0_0 = 32
        ch_0_1 = 96
        ch_0_2 = 96

        ch_1_0 = 32
        ch_1_1 = 96
        ch_1_2 = 96

        ch_2_0 = 192

        with self.name_scope():
            # branch 0
            self.b_0 = HybridSequential()
            self.b_0.add(get_pool(pool_type, pool_size=(2, 2), strides=(2, 2)))
            self.b_0.add(Conv2D(channels=ch_0_0, kernel_size=(1, 1), in_channels=in_ch))
            self.b_0.add(get_act(act_type))
            self.b_0.add(
                Conv2D(channels=ch_0_1, kernel_size=(3, 1), padding=(0, 1), in_channels=ch_0_0, use_bias=False))
            self.b_0.add(
                Conv2D(channels=ch_0_2, kernel_size=(1, 3), padding=(1, 0), in_channels=ch_0_1, use_bias=False))
            self.b_0.add(_UpsampleBlock('upsample0', scale=2))

            # branch 1
            self.b_1 = HybridSequential()
            self.b_1.add(Conv2D(channels=ch_1_0, kernel_size=(1, 1), in_channels=in_ch))
            self.b_1.add(get_act(act_type))
            self.b_1.add(
                Conv2D(channels=ch_1_1, kernel_size=(3, 1), padding=(0, 1), in_channels=ch_1_0, use_bias=False))
            self.b_1.add(
                Conv2D(channels=ch_1_2, kernel_size=(1, 3), padding=(1, 0), in_channels=ch_1_1, use_bias=False))

            # branch 2
            self.b_2 = HybridSequential()
            self.b_2.add(Conv2D(channels=ch_2_0, kernel_size=(1, 1), in_channels=in_ch, use_bias=False))

            # concatenate all branches and add them to the body
            self.branches.add(self.b_0)
            self.branches.add(self.b_1)
            self.branches.add(self.b_2)
            self.body.add(self.branches)
Exemple #13
0
def test_autolog_ends_auto_created_run():
    mlflow.gluon.autolog()

    data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")

    model = HybridSequential()
    model.add(Dense(64, activation="relu"))
    model.add(Dense(64, activation="relu"))
    model.add(Dense(10))
    model.initialize()
    model.hybridize()

    trainer = Trainer(model.collect_params(),
                      "adam",
                      optimizer_params={
                          "learning_rate": 0.001,
                          "epsilon": 1e-07
                      })
    est = estimator.Estimator(net=model,
                              loss=SoftmaxCrossEntropyLoss(),
                              trainer=trainer,
                              **get_metrics())

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        est.fit(data, epochs=3)

    assert mlflow.active_run() is None
def gluon_model(model_data):
    train_data, train_label, _ = model_data
    train_data_loader = DataLoader(list(zip(train_data, train_label)),
                                   batch_size=128,
                                   last_batch="discard")
    model = HybridSequential()
    model.add(Dense(128, activation="relu"))
    model.add(Dense(64, activation="relu"))
    model.add(Dense(10))
    model.initialize()
    model.hybridize()
    trainer = Trainer(model.collect_params(),
                      "adam",
                      optimizer_params={
                          "learning_rate": .001,
                          "epsilon": 1e-07
                      })
    est = estimator.Estimator(net=model,
                              loss=SoftmaxCrossEntropyLoss(),
                              metrics=Accuracy(),
                              trainer=trainer)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        est.fit(train_data_loader, epochs=3)
    return model
Exemple #15
0
    def __init__(self, name, channels=1, fc0=256, bn_mom=0.9, act_type="relu"):
        """
        Definition of the value head. Same as alpha zero authors but changed order Batch-Norm with RElu.

        :param name: name prefix for all blocks
        :param channels: Number of channels for 1st conv operation in branch 0
        :param fc0: Number of units in Dense/Fully-Connected layer
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param se_type: SqueezeExcitation type choose either [None, "cSE", "sSE", csSE"] for no squeeze excitation,
        channelwise squeeze excitation, channel-spatial-squeeze-excitation, respectively
        """

        super(_ValueHeadRise, self).__init__(prefix=name + "_")

        self.body = HybridSequential(prefix="")

        with self.name_scope():
            self.body.add(
                Conv2D(channels=channels, kernel_size=(1, 1), use_bias=False))
            self.body.add(BatchNorm(momentum=bn_mom))
            self.body.add(get_act(act_type))
            self.body.add(Flatten())
            self.body.add(Dense(units=fc0))
            self.body.add(get_act(act_type))
            self.body.add(Dense(units=1))
            self.body.add(get_act("tanh"))
def gluon_model(model_data):
    train_data, train_label, _ = model_data
    train_data_loader = DataLoader(list(zip(train_data, train_label)),
                                   batch_size=128,
                                   last_batch="discard")
    model = HybridSequential()
    model.add(Dense(128, activation="relu"))
    model.add(Dense(64, activation="relu"))
    model.add(Dense(10))
    model.initialize()
    model.hybridize()
    trainer = Trainer(model.collect_params(),
                      "adam",
                      optimizer_params={
                          "learning_rate": 0.001,
                          "epsilon": 1e-07
                      })

    # `metrics` was renamed in mxnet 1.6.0: https://github.com/apache/incubator-mxnet/pull/17048
    arg_name = ("metrics"
                if LooseVersion(mx.__version__) < LooseVersion("1.6.0") else
                "train_metrics")
    est = estimator.Estimator(net=model,
                              loss=SoftmaxCrossEntropyLoss(),
                              trainer=trainer,
                              **{arg_name: Accuracy()})
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        est.fit(train_data_loader, epochs=3)
    return model
Exemple #17
0
    def __init__(self, channels, bn_mom, act_type, unit_name, use_se=True, res_scale_fac=0.2):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(_RiseResidualBlock, self).__init__(unit_name)
        self.act_type = act_type
        self.unit_name = unit_name
        self.res_scale_fac = res_scale_fac

        self.use_se = use_se

        # branch 0
        self.body = HybridSequential()
        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False,
                             prefix='%s_conv0' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn0' % self.unit_name))
        self.body.add(get_act(act_type, prefix='%s_%s0' % (unit_name, act_type)))

        self.body.add(Conv2D(channels=channels, kernel_size=(3, 3), padding=(1, 1), use_bias=False,
                             prefix='%s_conv1' % unit_name))
        self.body.add(BatchNorm(momentum=bn_mom, prefix='%s_bn1' % self.unit_name))

        self.act0 = get_act(act_type, prefix='%s_%s1' % (unit_name, act_type))

        if use_se is True:
            self.se0 = _SqueezeExcitation('%s_se0' % unit_name, channels, 16, act_type)
Exemple #18
0
def test_autolog_registering_model():
    registered_model_name = "test_autolog_registered_model"
    mlflow.gluon.autolog(registered_model_name=registered_model_name)

    data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")

    model = HybridSequential()
    model.add(Dense(64, activation="relu"))
    model.add(Dense(10))
    model.initialize()
    model.hybridize()

    trainer = Trainer(model.collect_params(),
                      "adam",
                      optimizer_params={
                          "learning_rate": 0.001,
                          "epsilon": 1e-07
                      })
    est = get_estimator(model, trainer)

    with mlflow.start_run(), warnings.catch_warnings():
        warnings.simplefilter("ignore")
        est.fit(data, epochs=3)

        registered_model = MlflowClient().get_registered_model(
            registered_model_name)
        assert registered_model.name == registered_model_name
def gluon_random_data_run():
    mlflow.gluon.autolog()

    with mlflow.start_run() as run:
        data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
        validation = DataLoader(LogsDataset(),
                                batch_size=128,
                                last_batch="discard")

        model = HybridSequential()
        model.add(Dense(64, activation="relu"))
        model.add(Dense(64, activation="relu"))
        model.add(Dense(10))
        model.initialize()
        model.hybridize()
        trainer = Trainer(model.collect_params(),
                          "adam",
                          optimizer_params={
                              "learning_rate": .001,
                              "epsilon": 1e-07
                          })
        est = estimator.Estimator(net=model,
                                  loss=SoftmaxCrossEntropyLoss(),
                                  metrics=Accuracy(),
                                  trainer=trainer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(data, epochs=3, val_data=validation)

    return client.get_run(run.info.run_id)
    def __init__(self,
                 inner_channels,
                 outer_channels,
                 inner_block=None,
                 innermost=False,
                 outermost=False,
                 use_dropout=False,
                 use_bias=False,
                 final_out=3):
        super(UnetSkipUnit, self).__init__()

        with self.name_scope():
            self.outermost = outermost
            en_conv = Conv2D(channels=inner_channels,
                             kernel_size=4,
                             strides=2,
                             padding=1,
                             in_channels=outer_channels,
                             use_bias=use_bias)
            en_relu = LeakyReLU(alpha=0.2)
            en_norm = BatchNorm(momentum=0.1, in_channels=inner_channels)
            de_relu = Activation(activation='relu')
            de_norm = BatchNorm(momentum=0.1, in_channels=outer_channels)

            if innermost:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + decoder
            elif outermost:
                de_conv = Conv2DTranspose(channels=final_out,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2)
                encoder = [en_conv]
                decoder = [de_relu, de_conv, Activation(activation='tanh')]
                model = encoder + [inner_block] + decoder
            else:
                de_conv = Conv2DTranspose(channels=outer_channels,
                                          kernel_size=4,
                                          strides=2,
                                          padding=1,
                                          in_channels=inner_channels * 2,
                                          use_bias=use_bias)
                encoder = [en_relu, en_conv, en_norm]
                decoder = [de_relu, de_conv, de_norm]
                model = encoder + [inner_block] + decoder
            if use_dropout:
                model += [Dropout(rate=0.5)]

            self.model = HybridSequential()
            with self.model.name_scope():
                for block in model:
                    self.model.add(block)
 def __init__(self, opts):
     super(BasicBlock, self).__init__()
     self.bblock = HybridSequential()
     if opts.bottle_neck:
         self.bblock.add(
             BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         if not opts.trans_block:
             self.bblock.add(LeakyReLU(alpha=.2))
         else:
             self.bblock.add(Activation(opts.activation))
         self.bblock.add(
             Conv2D(channels=int(opts.growth_rate * 4),
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    use_bias=opts.use_bias,
                    padding=(0, 0)))
         if opts.drop_out > 0:
             self.bblock.add(Dropout(opts.drop_out))
     self.bblock.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
     self.bblock.add(Activation(activation=opts.activation))
     self.bblock.add(
         Conv2D(channels=int(opts.growth_rate),
                kernel_size=(3, 3),
                strides=(1, 1),
                use_bias=opts.use_bias,
                padding=(1, 1)))
     if opts.drop_out > 0:
         self.bblock.add(Dropout(opts.drop_out))
    def __init__(self, opts):
        super(DenseMultipathNet, self).__init__()
        opts.units = opts.units[:opts.num_stage]
        assert (len(opts.units) == opts.num_stage)

        num_filters = opts.init_channels
        num_filters_list = []
        for stage in range(opts.num_stage):
            num_filters += opts.units[stage] * opts.growth_rate
            num_filters = int(floor(num_filters * opts.reduction))
            num_filters_list.append(num_filters)

        self.net = HybridSequential()
        with self.net.name_scope():
            self.blocks = EncoderDecoderUnit(opts, num_filters_list[opts.num_stage-1], opts.num_stage-1, innermost=True)
            for stage in range(opts.num_stage-2, -1, -1):
                self.blocks = EncoderDecoderUnit(opts, num_filters_list[stage], stage, inner_block=self.blocks)
            self.net.add(FirstBlock(opts))
            self.net.add(self.blocks)
            self.net.add(ResDBlock(opts, num_filters=16))
            if opts.norm_type is 'batch':
                self.net.add(NormLayer())
            elif opts.norm_type is 'group':
                self.net.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.net.add(InstanceNorm())

            if opts.activation in ['leaky']:
                self.net.add(LeakyReLU(opts.alpha))
            else:
                self.net.add(Activation(opts.activation))
            self.net.add(Conv3D(kernel_size=(1, 1, 1), channels=2, use_bias=opts.use_bias))
            self.net.add(Softmax())
    def __init__(self, opts):
        super(BasicBlock, self).__init__()
        self.bblock = HybridSequential()
        if opts.bottle_neck:
            if opts.norm_type is 'batch':
                self.bblock.add(NormLayer())
            elif opts.norm_type is 'group':
                self.bblock.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.bblock.add(InstanceNorm())
            if opts.activation in ['leaky']:
                self.bblock.add(LeakyReLU(alpha=opts.alpha))
            else:
                self.bblock.add(Activation(opts.activation))
            self.bblock.add(Conv3D(channels=int(opts.growth_rate * 4), kernel_size=(opts.zKernelSize, 1, 1),
                              strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 0, 0)))
            if opts.drop_out > 0:
                self.bblock.add(Dropout(opts.drop_out))
        if opts.norm_type is 'batch':
            self.bblock.add(NormLayer())
        elif opts.norm_type is 'group':
            self.bblock.add(GroupNorm(in_channels=int(opts.growth_rate * 4)))
        elif opts.norm_type is 'instance':
            self.bblock.add(InstanceNorm())

        if opts.activation in ['leaky']:
            self.bblock.add(LeakyReLU(opts.alpha))
        else:
            self.bblock.add(Activation(opts.activation))
        self.bblock.add(Conv3D(channels=int(opts.growth_rate), kernel_size=(opts.zKernelSize, 3, 3),
                          strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 1, 1)))
        if opts.drop_out > 0:
            self.bblock.add(Dropout(opts.drop_out))
Exemple #24
0
    def __init__(self, in_channels, ndf=64, n_layers=3, use_sigmoid=False, use_bias=False):
        super(Discriminator, self).__init__()

        with self.name_scope():
            self.model = HybridSequential()
            kernel_size = 4
            padding = int(np.ceil((kernel_size - 1)/2))
            self.model.add(Conv2D(channels=ndf, kernel_size=kernel_size, strides=2,
                                  padding=padding, in_channels=in_channels))
            self.model.add(LeakyReLU(alpha=0.2))

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult_prev = nf_mult
                nf_mult = min(2 ** n, 8)
                self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=2,
                                      padding=padding, in_channels=ndf * nf_mult_prev,
                                      use_bias=use_bias))
                self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
                self.model.add(LeakyReLU(alpha=0.2))

            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n_layers, 8)
            self.model.add(Conv2D(channels=ndf * nf_mult, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult_prev,
                                  use_bias=use_bias))
            self.model.add(BatchNorm(momentum=0.1, in_channels=ndf * nf_mult))
            self.model.add(LeakyReLU(alpha=0.2))
            self.model.add(Conv2D(channels=1, kernel_size=kernel_size, strides=1,
                                  padding=padding, in_channels=ndf * nf_mult))
            if use_sigmoid:
                self.model.add(Activation(activation='sigmoid'))
Exemple #25
0
def test_autolog_persists_manually_created_run():
    mlflow.gluon.autolog()

    data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")

    with mlflow.start_run() as run:

        model = HybridSequential()
        model.add(Dense(64, activation="relu"))
        model.add(Dense(64, activation="relu"))
        model.add(Dense(10))
        model.initialize()
        model.hybridize()
        trainer = Trainer(
            model.collect_params(),
            "adam",
            optimizer_params={
                "learning_rate": 0.001,
                "epsilon": 1e-07
            },
        )
        est = get_estimator(model, trainer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(data, epochs=3)

        assert mlflow.active_run().info.run_id == run.info.run_id
Exemple #26
0
    def __init__(self, channels, bn_mom, act_type, unit_name):
        """

        :param channels: Number of channels used in the conv-operations
        :param bn_mom: Batch normalization momentum
        :param act_type: Activation function to use
        :param unit_name: Unit name of the residual block (only used for description (string))
        """
        super(ResidualBlock, self).__init__()
        self.act_type = act_type
        self.unit_name = unit_name

        self.body = HybridSequential()

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv0" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn0" % self.unit_name))
        self.body.add(
            Activation(self.act_type,
                       prefix="%s_%s0" % (self.unit_name, self.act_type)))

        self.body.add(
            Conv2D(channels=channels,
                   kernel_size=(3, 3),
                   padding=(1, 1),
                   use_bias=False,
                   prefix="%s_conv1" % unit_name))
        self.body.add(
            BatchNorm(momentum=bn_mom, prefix="%s_bn1" % self.unit_name))
Exemple #27
0
def get_gluon_random_data_run(log_models=True):
    mlflow.gluon.autolog(log_models)

    with mlflow.start_run() as run:
        data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
        validation = DataLoader(LogsDataset(),
                                batch_size=128,
                                last_batch="discard")

        model = HybridSequential()
        model.add(Dense(64, activation="relu"))
        model.add(Dense(64, activation="relu"))
        model.add(Dense(10))
        model.initialize()
        model.hybridize()
        trainer = Trainer(
            model.collect_params(),
            "adam",
            optimizer_params={
                "learning_rate": 0.001,
                "epsilon": 1e-07
            },
        )
        est = get_estimator(model, trainer)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(data, epochs=3, val_data=validation)
    client = mlflow.tracking.MlflowClient()
    return client.get_run(run.info.run_id)
Exemple #28
0
 def __init__(self, channels, strides, in_channels=0):
     super(Bottleneck, self).__init__()
     self.body = HybridSequential(prefix="")
     self.body.add(
         nn.Conv2D(channels=channels // 4, kernel_size=1, strides=1))
     self.body.add(nn.BatchNorm())
     self.body.add(nn.Activation('relu'))
     self.body.add(
         nn.Conv2D(channels=channels // 4,
                   kernel_size=3,
                   strides=strides,
                   padding=1,
                   use_bias=False,
                   in_channels=channels // 4))
     self.body.add(nn.BatchNorm())
     self.body.add(nn.Activation('relu'))
     self.body.add(nn.Conv2D(channels, kernel_size=1, strides=1))
     self.body.add(nn.BatchNorm())
     self.downsample = nn.HybridSequential()
     self.downsample.add(
         nn.Conv2D(channels=channels,
                   kernel_size=1,
                   strides=strides,
                   use_bias=False,
                   in_channels=in_channels))
     self.downsample.add(nn.BatchNorm())
Exemple #29
0
    def __init__(self, outer_channels, use_bias=False):
        super(Res_Block, self).__init__()
        with self.name_scope():
            conv1 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            relu1 = LeakyReLU(alpha=0.2)
            norm1 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            conv2 = Conv2D(channels=outer_channels,
                           kernel_size=3,
                           strides=1,
                           padding=1,
                           in_channels=outer_channels,
                           use_bias=use_bias)
            norm2 = BatchNorm(momentum=0.1, in_channels=outer_channels)
            relu2 = LeakyReLU(alpha=0.2)

            res_block = [conv1, norm1, relu1, conv2, norm2, relu2]
            self.res = HybridSequential()
            with self.res.name_scope():
                for block in res_block:
                    self.res.add(block)
Exemple #30
0
 def __init__(self, num_experts):
     super(DefaultRouter, self).__init__()
     with self.name_scope():
         self.body = HybridSequential(prefix='')
         self.body.add(GlobalAvgPool2D())
         # self.body.add(Dense(num_experts//4, activation='relu'))
         self.body.add(Dense(num_experts, activation='sigmoid'))