Example #1
0
    def __init__(self,
                 nfilters,
                 nheads=1,
                 _norm_type='BatchNorm',
                 norm_groups=None,
                 ftdepth=5,
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        with self.name_scope():

            self.conv1 = Conv2DNormed(channels=nfilters,
                                      kernel_size=3,
                                      padding=1,
                                      groups=nheads,
                                      _norm_type=_norm_type,
                                      norm_groups=norm_groups,
                                      **kwards)  # restore help
            self.conv3 = Fusion(nfilters=nfilters,
                                kernel_size=3,
                                padding=1,
                                nheads=nheads,
                                norm=_norm_type,
                                norm_groups=norm_groups,
                                ftdepth=ftdepth,
                                **kwards)  # process
Example #2
0
    def __init__(self, _nfilters, _norm_type='BatchNorm', **kwards):
        HybridBlock.__init__(self, **kwards)

        self.nfilters = _nfilters

        # This is used as a container (list) of layers
        self.convs = gluon.nn.HybridSequential()
        with self.name_scope():

            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv1_"))
            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv2_"))
            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv3_"))
            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv4_"))

        self.conv_norm_final = Conv2DNormed(channels=self.nfilters,
                                            kernel_size=(1, 1),
                                            padding=(0, 0),
                                            _norm_type=_norm_type)
Example #3
0
    def __init__(self,  channels, kernel_size, strides=(1, 1), 
                 padding=(0, 0), dilation=(1, 1),   activation=None, 
                 weight_initializer=None,  in_channels=0, _norm_type = 'BatchNorm', axis =1 ,**kwards):
        HybridBlock.__init__(self,**kwards)

        if (_norm_type == 'BatchNorm'):
            self.norm = gluon.nn.BatchNorm
        elif (_norm_type == 'SyncBatchNorm'):
            self.norm = gluon.contrib.nn.SyncBatchNorm
            _prefix = "_SyncBN"
        elif (_norm_type == 'InstanceNorm'):
            self.norm = gluon.nn.InstanceNorm

        elif (_norm_type == 'LayerNorm'):
            self.norm = gluon.nn.LayerNorm
        else:
            raise NotImplementedError


        with self.name_scope():
            self.conv2d = gluon.nn.Conv2D(channels, kernel_size = kernel_size, 
                                          strides= strides, 
                                          padding=padding,
                                          dilation= dilation, 
                                          activation=activation, 
                                          use_bias=False, 
                                          weight_initializer = weight_initializer, 
                                          in_channels=0)

            self.norm_layer = self.norm(axis=axis)
Example #4
0
    def __init__(self,
                 num_init_features,
                 growth_rate,
                 block_config,
                 reduction,
                 bn_size,
                 downsample,
                 initial_layers="imagenet",
                 dropout=0,
                 classes=1000,
                 dilated=False,
                 config_string="DIDIDIDITDIDIDIDIDITDIDIDIDITDIDIDIDI",
                 **kwargs):
        HybridBlock.__init__(self, **kwargs)
        self.num_blocks = len(block_config)
        self.dilation = (1, 1, 2, 4) if dilated else (1, 1, 1, 1)
        self.downsample_struct = downsample
        self.bn_size = bn_size
        self.growth_rate = growth_rate
        self.dropout = dropout
        self.reduction_rates = reduction
        self.config_string = config_string

        if block_config is not [-1, -1, -1, -1]:
            warnings.warn(
                "Attention, the MeliusNetCustom block_config constructor parameter is not [-1,-1,-1,-1]."
                " This parameter only exists for backward compatibility but isn't used anymore"
                " because the configuration is read from the config_string. Make sure you understand how the"
                " MeliusNetCustom class should be used.")

        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            add_initial_layers(initial_layers, self.features,
                               num_init_features)
            # Add dense blocks
            self.num_features = num_init_features
            if self.config_string.count("T") != 3:
                raise Exception(
                    "config_string must contain exactly 3 tansition layers")
            self.meliusnet_block_configs = self.config_string.split('T')
            for i, block_string in enumerate(self.meliusnet_block_configs):
                self._make_repeated_base_blocks(block_string, i)
                if i != len(block_config) - 1:
                    self._make_transition(i)
            self.finalize = nn.HybridSequential(prefix='')
            self.finalize.add(nn.BatchNorm())
            self.finalize.add(nn.Activation('relu'))
            if dilated:
                self.finalize.add(nn.AvgPool2D(pool_size=28))
            else:
                self.finalize.add(
                    nn.AvgPool2D(
                        pool_size=4 if initial_layers == "thumbnail" else 7))
            self.finalize.add(nn.Flatten())

            self.output = nn.Dense(classes)
Example #5
0
    def __init__(self, _nfilters, _norm_type='BatchNorm', **kwards):
        HybridBlock.__init__(self, **kwards)

        with self.name_scope():

            # This performs convolution, no BatchNormalization. No need for bias.
            self.up = UpSample(_nfilters, _norm_type=_norm_type)

            self.conv_normed = Conv2DNormed(channels=_nfilters,
                                            kernel_size=(1, 1),
                                            padding=(0, 0),
                                            _norm_type=_norm_type)
Example #6
0
 def __init__(self,_nfilters, factor = 2,  _norm_type='BatchNorm', **kwards):
     HybridBlock.__init__(self,**kwards)
     
     
     self.factor = factor
     self.nfilters = _nfilters // self.factor
     
     with self.name_scope():
         self.convup_normed = Conv2DNormed(self.nfilters,
                                           kernel_size = (1,1),
                                           _norm_type = _norm_type, 
                                           prefix="_convdn_")
Example #7
0
    def __init__(self,
                 nfilters,
                 factor=2,
                 _norm_type='BatchNorm',
                 norm_groups=None,
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.factor = factor
        self.nfilters = nfilters // self.factor

        self.convup_normed = Conv2DNormed(self.nfilters,
                                          kernel_size=(1, 1),
                                          _norm_type=_norm_type,
                                          norm_groups=norm_groups)
Example #8
0
    def __init__(self,
                 _nfilters,
                 _kernel_size=(3, 3),
                 _dilation_rate=(1, 1),
                 _norm_type='BatchNorm',
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.nfilters = _nfilters
        self.kernel_size = _kernel_size
        self.dilation_rate = _dilation_rate

        if (_norm_type == 'BatchNorm'):
            self.norm = gluon.nn.BatchNorm
            _prefix = "_BN"
        elif (_norm_type == 'InstanceNorm'):
            self.norm = gluon.nn.InstanceNorm
            _prefix = "_IN"
        elif (norm_type == 'LayerNorm'):
            self.norm = gluon.nn.LayerNorm
            _prefix = "_LN"
        else:
            raise NotImplementedError

        with self.name_scope():

            # Ensures padding = 'SAME' for ODD kernel selection
            p0 = self.dilation_rate[0] * (self.kernel_size[0] - 1) / 2
            p1 = self.dilation_rate[1] * (self.kernel_size[1] - 1) / 2
            p = (int(p0), int(p1))

            self.BN1 = self.norm(axis=1, prefix=_prefix + "1_")
            self.conv1 = gluon.nn.Conv2D(self.nfilters,
                                         kernel_size=self.kernel_size,
                                         padding=p,
                                         dilation=self.dilation_rate,
                                         use_bias=False,
                                         prefix="_conv1_")
            self.BN2 = self.norm(axis=1, prefix=_prefix + "2_")
            self.conv2 = gluon.nn.Conv2D(self.nfilters,
                                         kernel_size=self.kernel_size,
                                         padding=p,
                                         dilation=self.dilation_rate,
                                         use_bias=True,
                                         prefix="_conv2_")
Example #9
0
    def __init__(self,_nfilters, _factor=2,  _norm_type='BatchNorm', **kwards): 
        HybridBlock.__init__(self, **kwards)
        
        
        # Double the size of filters, since you will downscale by 2. 
        self.factor = _factor 
        self.nfilters = _nfilters * self.factor
        # I was using a kernel size of 1x1, this is notthing to do with max pooling, or selecting the most dominant number. Now changing that.
        # There is bug somewhere, if I use kernel_size = 2, code crashes with memory-illegal access. 
        # Am not sure it is my bug, or something mxnet related 

        # Kernel = 3, padding = 1 works fine, no bug here in latest version of mxnet. 
        self.kernel_size = (3,3) 
        self.strides = (2,2)
        self.pad = (1,1)


        with self.name_scope():
            self.convdn = gluon.nn.Conv2D(self.nfilters,
                                          kernel_size=self.kernel_size,
                                          strides=self.strides,
                                          padding = self.pad,
                                          use_bias=False,
                                          prefix="_convdn_")
Example #10
0
    def __init__(self,
                 _nfilters_init,
                 _NClasses,
                 verbose=True,
                 _norm_type='BatchNorm',
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.model_name = "ResUNet_d6_encoder"

        self.depth = 6

        self.nfilters = _nfilters_init  # Initial number of filters
        self.NClasses = _NClasses

        with self.name_scope():

            # First convolution Layer
            # Starting with first convolutions to make the input "channel" dim equal to the number of initial filters
            self.conv_first_normed = Conv2DNormed(channels=self.nfilters,
                                                  kernel_size=(1, 1),
                                                  _norm_type=_norm_type,
                                                  prefix="_conv_first_")

            # Progressively reducing the dilation_rate of Atrous convolutions (the deeper the smaller).

            # Usually 32
            nfilters = self.nfilters * 2**(0)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(0, nfilters))
            self.Dn1 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)
            self.pool1 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 64
            nfilters = self.nfilters * 2**(1)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(1, nfilters))
            self.Dn2 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)
            self.pool2 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 128
            nfilters = self.nfilters * 2**(2)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(2, nfilters))
            self.Dn3 = ResNet_atrous_2_unit(nfilters, _norm_type=_norm_type)
            self.pool3 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 256
            nfilters = self.nfilters * 2**(3)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(3, nfilters))
            self.Dn4 = ResNet_atrous_2_unit(nfilters,
                                            _dilation_rates=[3, 5],
                                            _norm_type=_norm_type)
            self.pool4 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 512
            nfilters = self.nfilters * 2**(4)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(4, nfilters))
            self.Dn5 = ResNet_v2_unit(nfilters, _norm_type=_norm_type)
            self.pool5 = DownSample(nfilters)

            # Usually 1024
            nfilters = self.nfilters * 2**(5)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(5, nfilters))
            self.Dn6 = ResNet_v2_unit(nfilters)

            # Same number of filters, with new definition
            self.middle = PSP_Pooling(nfilters, _norm_type=_norm_type)
Example #11
0
    def __init__(self,
                 _nfilters_init,
                 _NClasses,
                 verbose=True,
                 _norm_type='BatchNorm',
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.model_name = "ResUNet_d7_cmtskc"

        self.depth = 7

        self.nfilters = _nfilters_init  # Initial number of filters
        self.NClasses = _NClasses

        with self.name_scope():

            self.encoder = ResUNet_d7_encoder(self.nfilters,
                                              self.NClasses,
                                              _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 1)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(7, nfilters))
            self.UpComb1 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv1 = ResNet_v2_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 2)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(8, nfilters))
            self.UpComb2 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv2 = ResNet_atrous_2_unit(nfilters,
                                                _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 3)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(9, nfilters))
            self.UpComb3 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv3 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 4)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(10, nfilters))
            self.UpComb4 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv4 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 5)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(11, nfilters))
            self.UpComb5 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv5 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 6)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(12, nfilters))
            self.UpComb6 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv6 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            self.psp_2ndlast = PSP_Pooling(self.nfilters,
                                           _norm_type=_norm_type)

            # Segmenetation logits -- deeper for better reconstruction
            self.logits = gluon.nn.HybridSequential()
            self.logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.logits.add(gluon.nn.Activation('relu'))
            self.logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.logits.add(gluon.nn.Activation('relu'))
            self.logits.add(
                gluon.nn.Conv2D(self.NClasses, kernel_size=1, padding=0))

            # bound logits
            self.bound_logits = gluon.nn.HybridSequential()
            self.bound_logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.bound_logits.add(gluon.nn.Activation('relu'))
            self.bound_logits.add(
                gluon.nn.Conv2D(self.NClasses, kernel_size=1, padding=0))

            # distance logits -- deeper for better reconstruction
            self.distance_logits = gluon.nn.HybridSequential()
            self.distance_logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.distance_logits.add(gluon.nn.Activation('relu'))
            self.distance_logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.distance_logits.add(gluon.nn.Activation('relu'))
            self.distance_logits.add(
                gluon.nn.Conv2D(self.NClasses, kernel_size=1, padding=0))

            # This layer is trying to identify the exact coloration on HSV scale (cv2 devined)
            self.color_logits = gluon.nn.Conv2D(3, kernel_size=1, padding=0)

            # Last activation, customization for binary results
            if (self.NClasses == 1):
                self.ChannelAct = gluon.nn.HybridLambda(
                    lambda F, x: F.sigmoid(x))
            else:
                self.ChannelAct = gluon.nn.HybridLambda(
                    lambda F, x: F.softmax(x, axis=1))