def __init__(self, opts):
        super(BasicBlock, self).__init__()
        self.bblock = HybridSequential()
        if opts.bottle_neck:
            if opts.norm_type is 'batch':
                self.bblock.add(NormLayer())
            elif opts.norm_type is 'group':
                self.bblock.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.bblock.add(InstanceNorm())
            if opts.activation in ['leaky']:
                self.bblock.add(LeakyReLU(alpha=opts.alpha))
            else:
                self.bblock.add(Activation(opts.activation))
            self.bblock.add(Conv3D(channels=int(opts.growth_rate * 4), kernel_size=(opts.zKernelSize, 1, 1),
                              strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 0, 0)))
            if opts.drop_out > 0:
                self.bblock.add(Dropout(opts.drop_out))
        if opts.norm_type is 'batch':
            self.bblock.add(NormLayer())
        elif opts.norm_type is 'group':
            self.bblock.add(GroupNorm(in_channels=int(opts.growth_rate * 4)))
        elif opts.norm_type is 'instance':
            self.bblock.add(InstanceNorm())

        if opts.activation in ['leaky']:
            self.bblock.add(LeakyReLU(opts.alpha))
        else:
            self.bblock.add(Activation(opts.activation))
        self.bblock.add(Conv3D(channels=int(opts.growth_rate), kernel_size=(opts.zKernelSize, 3, 3),
                          strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 1, 1)))
        if opts.drop_out > 0:
            self.bblock.add(Dropout(opts.drop_out))
    def __init__(self, opts):
        super(DenseMultipathNet, self).__init__()
        opts.units = opts.units[:opts.num_stage]
        assert (len(opts.units) == opts.num_stage)

        num_filters = opts.init_channels
        num_filters_list = []
        for stage in range(opts.num_stage):
            num_filters += opts.units[stage] * opts.growth_rate
            num_filters = int(floor(num_filters * opts.reduction))
            num_filters_list.append(num_filters)

        self.net = HybridSequential()
        with self.net.name_scope():
            self.blocks = EncoderDecoderUnit(opts, num_filters_list[opts.num_stage-1], opts.num_stage-1, innermost=True)
            for stage in range(opts.num_stage-2, -1, -1):
                self.blocks = EncoderDecoderUnit(opts, num_filters_list[stage], stage, inner_block=self.blocks)
            self.net.add(FirstBlock(opts))
            self.net.add(self.blocks)
            self.net.add(ResDBlock(opts, num_filters=16))
            if opts.norm_type is 'batch':
                self.net.add(NormLayer())
            elif opts.norm_type is 'group':
                self.net.add(GroupNorm())
            elif opts.norm_type is 'instance':
                self.net.add(InstanceNorm())

            if opts.activation in ['leaky']:
                self.net.add(LeakyReLU(opts.alpha))
            else:
                self.net.add(Activation(opts.activation))
            self.net.add(Conv3D(kernel_size=(1, 1, 1), channels=2, use_bias=opts.use_bias))
            self.net.add(Softmax())
Example #3
0
def conv_block(channels, num_convs=2, use_bias=False, use_global_stats=False, **kwargs):
    """Define U-Net convolution block"""
    out = HybridSequential(prefix="")
    with out.name_scope():
        for _ in range(num_convs):
            out.add(Conv3D(channels=channels, kernel_size=3, padding=1, use_bias=use_bias))
            out.add(Activation('relu'))
            out.add(BatchNorm(use_global_stats=use_global_stats)) #BN after relu seems to be the more recommended option. 
    return out
Example #4
0
 def __init__(self, opts):
     super(FirstBlock, self).__init__()
     self.fblock = HybridSequential()
     self.fblock.add(
         Conv3D(channels=opts.init_channels,
                kernel_size=(opts.zKernelSize, 3, 3),
                strides=(opts.zStride, 1, 1),
                padding=(opts.zPad, 1, 1),
                use_bias=opts.use_bias))
 def __init__(self, opts, num_filters, pool_type='avg'):
     super(TransitionBlock, self).__init__()
     self.pool_type = pool_type
     self.tblock = HybridSequential()
     if opts.norm_type is 'batch':
         self.tblock.add(NormLayer())
     elif opts.norm_type is 'group':
         self.tblock.add(GroupNorm())
     elif opts.norm_type is 'instance':
         self.tblock.add(InstanceNorm())
     if opts.activation in ['leaky']:
         self.tblock.add(LeakyReLU(opts.alpha))
     else:
         self.tblock.add(Activation(opts.activation))
     self.tblock.add(Conv3D(channels=int(num_filters * opts.reduction), kernel_size=(opts.zKernelSize, 1, 1),
                       strides=(opts.zStride, 1, 1), use_bias=opts.use_bias, padding=(opts.zPad, 0, 0)))
     if opts.drop_out > 0:
         self.tblock.add(Dropout(opts.drop_out))
def conv_factory(opts, num_filters, kernel_size, stride=1, group=1):
    """A convenience function for convolution with batchnorm & activation"""
    pad = int((kernel_size - 1) / 2)
    out = HybridSequential()
    if opts.norm_type is 'batch':
        out.add(NormLayer())
    elif opts.norm_type is 'group':
        out.add(GroupNorm())
    elif opts.norm_type is 'instance':
        out.add(InstanceNorm())

    if opts.activation in ['leaky']:
        out.add(LeakyReLU(opts.alpha))
    else:
        out.add(Activation(opts.activation))

    out.add(Conv3D(channels=num_filters, kernel_size=(opts.zKernelSize, kernel_size, kernel_size),
                   strides=(opts.zStride, 1, 1), use_bias=opts.use_bias,
                   padding=(opts.zPad, pad, pad), groups=group))
    return out
Example #7
0
 def build_notch(self):
     """Summarize multiple branches"""
     net = HybridSequential()
     with net.name_scope():
         if opts.norm_type is 'batch':
             net.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         elif opts.norm_type is 'instance':
             net.add(InstanceNorm())
         if opts.activation in ['leaky']:
             net.add(LeakyReLU(opts.alpha))
         else:
             net.add(Activation(opts.activation))
         net.add(
             Conv3D(kernel_size=(1, 1, 1),
                    channels=2,
                    use_bias=opts.use_bias))
         if opts.norm_type is 'batch':
             net.add(BatchNorm(momentum=opts.bn_mom, epsilon=opts.bn_eps))
         elif opts.norm_type is 'instance':
             net.add(InstanceNorm())
         net.add(Softmax())
     return net
Example #8
0
 def __init__(self, inner_channels, outer_channels, inner_block=None, innermost=False, outermost=False, use_dropout=False, use_bias=False, **kwargs):
     super(UnetSkipUnit, self).__init__()
     
     with self.name_scope():
         self.outermost = outermost
         
         downsample = MaxPool3D(pool_size=2, strides=2)
         upsample = Conv3DTranspose(channels=outer_channels, kernel_size=2, padding=0, strides=2, use_bias=use_bias)
         head = Conv3D(channels=outer_channels, kernel_size=1)
         
         self.model = HybridSequential()
         if not outermost:
             self.model.add(downsample)
         self.model.add(conv_block(inner_channels, use_bias=use_bias, **kwargs))
         if not innermost:
             self.model.add(inner_block)
             self.model.add(conv_block(inner_channels, use_bias=use_bias, **kwargs))
         if not outermost:
             self.model.add(upsample)
         if outermost:
             if use_dropout:
                 self.model.add(Dropout(rate=0.1))
             self.model.add(head)
    def __init__(self,
                 computable_loss,
                 initializer=None,
                 learning_rate=1e-05,
                 learning_attenuate_rate=1.0,
                 attenuate_epoch=50,
                 ctx=mx.gpu(),
                 hybridize_flag=True,
                 activation="relu6",
                 filter_multiplier=1.0,
                 input_filter_n=32,
                 input_kernel_size=(1, 3, 3),
                 input_strides=(1, 2, 2),
                 input_padding=(1, 1, 1),
                 bottleneck_dict_list=[
                     {
                         "filter_rate": 1,
                         "filter_n": 16,
                         "block_n": 1,
                         "stride": (1, 1, 1)
                     },
                     {
                         "filter_rate": 6,
                         "filter_n": 24,
                         "block_n": 2,
                         "stride": (1, 2, 2)
                     },
                     {
                         "filter_rate": 6,
                         "filter_n": 32,
                         "block_n": 3,
                         "stride": (1, 2, 2)
                     },
                     {
                         "filter_rate": 6,
                         "filter_n": 64,
                         "block_n": 4,
                         "stride": (1, 2, 2)
                     },
                     {
                         "filter_rate": 6,
                         "filter_n": 96,
                         "block_n": 3,
                         "stride": (1, 1, 1)
                     },
                     {
                         "filter_rate": 6,
                         "filter_n": 160,
                         "block_n": 3,
                         "stride": (1, 2, 2)
                     },
                     {
                         "filter_rate": 6,
                         "filter_n": 320,
                         "block_n": 1,
                         "stride": (1, 1, 1)
                     },
                 ],
                 hidden_filter_n=1280,
                 pool_size=(1, 7, 7),
                 output_nn=None,
                 optimizer_name="SGD",
                 shortcut_flag=True,
                 global_shortcut_flag=False,
                 output_batch_norm_flag=True,
                 scale=1.0,
                 init_deferred_flag=None,
                 **kwargs):
        '''
        Init.

        Args:
            computable_loss:                is-a `ComputableLoss` or `mxnet.gluon.loss`.
            initializer:                is-a `mxnet.initializer.Initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
            learning_rate:                  `float` of learning rate.
            learning_attenuate_rate:        `float` of attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                `int` of attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
                                            

            ctx:                        `mx.cpu()` or `mx.gpu()`.
            hybridize_flag:             `bool` flag whether this class will hybridize models or not.
            activation:                 `str` of activtion function.
                                        - `relu`: ReLu function.
                                        - `relu6`: ReLu6 function.
                                        - `identity`: Identity function.
                                        - `identity_adjusted`: Identity function and normalization(divided by sum) function.

            filter_multiplier:          `float` of multiplier to compress size of model.
            input_filter_n:             `int` of the number of filters in input lauer.
            input_kernel:               `tuple` or `int` of kernel size.
            input_strides:              `tuple` or `int` of strides.
            input_padding:              `tuple` or `int` of zero-padding.
            bottleneck_dict_list:       `list` of information of bottleneck layers whose `dict` means ...
                                        - `filter_rate`: `float` of filter expfilter.
                                        - `filter_n`: `int` of the number of filters.
                                        - `block_n`: `int` of the number of blocks.
                                        - `stride`: `int` or `tuple` of strides.

            hidden_filter_n:            `int` of the number of filters in hidden layers.
            hidden_kernel_size:         `tuple` or `int` of kernel size in hidden layers.
            hidden_strides:             `tuple` or `int` of strides.
            hidden_padding:             `tuple` or `int` of zero-padding.
            pool_size:                  `tuple` or `int` of pooling size in hidden layer.
                                        If `None`, the pooling layer will not attatched in hidden layer.

            optimizer_name:             `str` of name of optimizer.

            shortcut_flag:              `bool` flag that means shortcut will be added into residual blocks or not.
            global_shortcut_flag:       `bool` flag that means shortcut will be added into residual blocks or not.
                                        This shortcut will propagate input data into output layer.
            scale:                      `float` of scaling factor for initial parameters.
            init_deferred_flag:         `bool` that means initialization in this class will be deferred or not.

        '''

        if init_deferred_flag is None:
            init_deferred_flag = self.init_deferred_flag
        elif isinstance(init_deferred_flag, bool) is False:
            raise TypeError("The type of `init_deferred_flag` must be `bool`.")

        self.init_deferred_flag = True

        super().__init__(computable_loss=computable_loss,
                         initializer=initializer,
                         learning_rate=learning_rate,
                         learning_attenuate_rate=learning_attenuate_rate,
                         attenuate_epoch=attenuate_epoch,
                         output_nn=output_nn,
                         optimizer_name=optimizer_name,
                         ctx=ctx,
                         hybridize_flag=hybridize_flag,
                         scale=scale,
                         not_init_flag=True,
                         hidden_units_list=[],
                         hidden_dropout_rate_list=[],
                         hidden_batch_norm_list=[],
                         hidden_activation_list=[],
                         **kwargs)
        self.init_deferred_flag = init_deferred_flag

        input_filter_n = int(round(input_filter_n * filter_multiplier))
        hidden_filter_n = int(hidden_filter_n * filter_multiplier) if int(
            filter_multiplier) > 1 else hidden_filter_n

        if activation == "relu6" or activation == "identity_adjusted" or activation == "identity":
            batch_norm_scale = True
        else:
            batch_norm_scale = False

        self.__input_layers_list = [
            Conv3D(
                channels=input_filter_n,
                kernel_size=input_kernel_size,
                strides=input_strides,
                padding=input_padding,
                use_bias=False,
            ),
            BatchNorm(axis=1,
                      epsilon=1e-05,
                      center=True,
                      scale=batch_norm_scale),
        ]
        if activation == "relu6":
            self.__input_layers_list.append(ReLuN(min_n=0, max_n=6))
        elif activation == "relu":
            self.__input_layers_list.append(ReLuN(min_n=0, max_n=-1))

        inverted_residual_blocks_list = [None] * len(bottleneck_dict_list)
        in_filter_n = input_filter_n

        for i in range(len(bottleneck_dict_list)):
            inverted_residual_blocks_list[i] = []
            blocks_list = []

            filter_expfilter_n = int(
                round(in_filter_n * bottleneck_dict_list[i]["filter_rate"]))

            for j in range(bottleneck_dict_list[i]["block_n"]):
                channel_expand_conv = Conv3D(
                    channels=filter_expfilter_n,
                    kernel_size=(1, 1, 1),
                    strides=(1, 1, 1),
                    padding=(0, 0, 0),
                    groups=1,
                    use_bias=False,
                )
                if j == 0:
                    strides = bottleneck_dict_list[i]["stride"]
                else:
                    strides = (1, 1, 1)

                bottleneck_conv = Conv2D(
                    channels=filter_expfilter_n,
                    kernel_size=(1, 3, 3),
                    strides=strides,
                    padding=(1, 1, 1),
                    groups=1,
                    use_bias=False,
                )
                linear_conv = Conv2D(
                    channels=bottleneck_dict_list[i]["filter_n"],
                    kernel_size=(1, 1, 1),
                    strides=(1, 1, 1),
                    padding=(0, 0, 0),
                    groups=1,
                    use_bias=False,
                )
                blocks_list.append(channel_expand_conv)
                blocks_list.append(
                    BatchNorm(axis=1,
                              epsilon=1e-05,
                              center=True,
                              scale=batch_norm_scale), )
                if activation == "relu6":
                    blocks_list.append(ReLuN(min_n=0, max_n=6))
                elif activation == "relu":
                    blocks_list.append(ReLuN(min_n=0, max_n=-1))

                blocks_list.append(bottleneck_conv)
                blocks_list.append(
                    BatchNorm(axis=1,
                              epsilon=1e-05,
                              center=True,
                              scale=batch_norm_scale), )
                if activation == "relu6":
                    blocks_list.append(ReLuN(min_n=0, max_n=6))
                elif activation == "relu":
                    blocks_list.append(ReLuN(min_n=0, max_n=-1))

                blocks_list.append(linear_conv)
                blocks_list.append(
                    BatchNorm(axis=1, epsilon=1e-05, center=True,
                              scale=True), )

            inverted_residual_blocks_list[i].append(blocks_list)

            in_filter_n = int(
                round(bottleneck_dict_list[i]["filter_n"] * filter_multiplier))

        self.__inverted_residual_blocks_list = inverted_residual_blocks_list

        self.__output_layers_list = [
            Conv2D(
                channels=hidden_filter_n,
                kernel_size=(1, 1, 1),
                strides=(1, 1, 1),
                padding=(0, 0, 0),
                use_bias=False,
            )
        ]

        if output_batch_norm_flag is True:
            self.__output_layers_list.append(
                BatchNorm(axis=1,
                          epsilon=1e-05,
                          center=True,
                          scale=batch_norm_scale))

        if activation == "relu6":
            self.__output_layers_list.append(ReLuN(min_n=0, max_n=6))
        elif activation == "relu":
            self.__output_layers_list.append(ReLuN(min_n=0, max_n=-1))

        if pool_size is not None:
            self.__output_layers_list.append(
                GlobalAvgPool2D(pool_size=pool_size))

        self.__shortcut_flag = shortcut_flag

        if initializer is None:
            if activation == "relu" or activation == "relu6":
                magnitude = 2
            else:
                magnitude = 1

            self.initializer = mx.initializer.Xavier(rnd_type="gaussian",
                                                     factor_type="in",
                                                     magnitude=magnitude)
        else:
            if isinstance(initializer, mx.initializer.Initializer) is False:
                raise TypeError(
                    "The type of `initializer` must be `mxnet.initializer.Initializer`."
                )
            self.initializer = initializer

        with self.name_scope():
            for i in range(len(self.__input_layers_list)):
                self.register_child(self.__input_layers_list[i])

            for i in range(len(self.__inverted_residual_blocks_list)):
                for j in range(len(self.__inverted_residual_blocks_list[i])):
                    for k in range(
                            len(self.__inverted_residual_blocks_list[i][j])):
                        self.register_child(
                            self.__inverted_residual_blocks_list[i][j][k])

            for i in range(len(self.__output_layers_list)):
                self.register_child(self.__output_layers_list[i])

            if output_nn is not None:
                self.output_nn = output_nn

        self.__global_shortcut_flag = global_shortcut_flag

        if self.init_deferred_flag is False:
            try:
                self.collect_params().initialize(self.initializer,
                                                 force_reinit=True,
                                                 ctx=ctx)
                params_dict = {"learning_rate": learning_rate}

                self.trainer = gluon.Trainer(self.collect_params(),
                                             optimizer_name, params_dict)

                if hybridize_flag is True:
                    self.hybridize()
                    if self.output_nn is not None:
                        self.output_nn.hybridize()

            except InitDeferredError:
                self.__logger.debug("The initialization should be deferred.")

        logger = getLogger("accelbrainbase")
        self.__logger = logger

        self.__activation = activation
        self.__hybridize_flag = hybridize_flag