示例#1
0
    def __init__(self,  channels, kernel_size, strides=(1, 1), 
                 padding=(0, 0), dilation=(1, 1),   activation=None, 
                 weight_initializer=None,  in_channels=0, _norm_type = 'BatchNorm', axis =1 ,**kwards):
        HybridBlock.__init__(self,**kwards)

        if (_norm_type == 'BatchNorm'):
            self.norm = gluon.nn.BatchNorm
        elif (_norm_type == 'SyncBatchNorm'):
            self.norm = gluon.contrib.nn.SyncBatchNorm
            _prefix = "_SyncBN"
        elif (_norm_type == 'InstanceNorm'):
            self.norm = gluon.nn.InstanceNorm

        elif (_norm_type == 'LayerNorm'):
            self.norm = gluon.nn.LayerNorm
        else:
            raise NotImplementedError


        with self.name_scope():
            self.conv2d = gluon.nn.Conv2D(channels, kernel_size = kernel_size, 
                                          strides= strides, 
                                          padding=padding,
                                          dilation= dilation, 
                                          activation=activation, 
                                          use_bias=False, 
                                          weight_initializer = weight_initializer, 
                                          in_channels=0)

            self.norm_layer = self.norm(axis=axis)
示例#2
0
    def __init__(self,
                 nfilters,
                 nheads=1,
                 _norm_type='BatchNorm',
                 norm_groups=None,
                 ftdepth=5,
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        with self.name_scope():

            self.conv1 = Conv2DNormed(channels=nfilters,
                                      kernel_size=3,
                                      padding=1,
                                      groups=nheads,
                                      _norm_type=_norm_type,
                                      norm_groups=norm_groups,
                                      **kwards)  # restore help
            self.conv3 = Fusion(nfilters=nfilters,
                                kernel_size=3,
                                padding=1,
                                nheads=nheads,
                                norm=_norm_type,
                                norm_groups=norm_groups,
                                ftdepth=ftdepth,
                                **kwards)  # process
示例#3
0
    def __init__(self, _nfilters, _norm_type='BatchNorm', **kwards):
        HybridBlock.__init__(self, **kwards)

        self.nfilters = _nfilters

        # This is used as a container (list) of layers
        self.convs = gluon.nn.HybridSequential()
        with self.name_scope():

            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv1_"))
            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv2_"))
            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv3_"))
            self.convs.add(
                Conv2DNormed(self.nfilters // 4,
                             kernel_size=(1, 1),
                             padding=(0, 0),
                             prefix="_conv4_"))

        self.conv_norm_final = Conv2DNormed(channels=self.nfilters,
                                            kernel_size=(1, 1),
                                            padding=(0, 0),
                                            _norm_type=_norm_type)
示例#4
0
    def __init__(self,
                 num_init_features,
                 growth_rate,
                 block_config,
                 reduction,
                 bn_size,
                 downsample,
                 initial_layers="imagenet",
                 dropout=0,
                 classes=1000,
                 dilated=False,
                 config_string="DIDIDIDITDIDIDIDIDITDIDIDIDITDIDIDIDI",
                 **kwargs):
        HybridBlock.__init__(self, **kwargs)
        self.num_blocks = len(block_config)
        self.dilation = (1, 1, 2, 4) if dilated else (1, 1, 1, 1)
        self.downsample_struct = downsample
        self.bn_size = bn_size
        self.growth_rate = growth_rate
        self.dropout = dropout
        self.reduction_rates = reduction
        self.config_string = config_string

        if block_config is not [-1, -1, -1, -1]:
            warnings.warn(
                "Attention, the MeliusNetCustom block_config constructor parameter is not [-1,-1,-1,-1]."
                " This parameter only exists for backward compatibility but isn't used anymore"
                " because the configuration is read from the config_string. Make sure you understand how the"
                " MeliusNetCustom class should be used.")

        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            add_initial_layers(initial_layers, self.features,
                               num_init_features)
            # Add dense blocks
            self.num_features = num_init_features
            if self.config_string.count("T") != 3:
                raise Exception(
                    "config_string must contain exactly 3 tansition layers")
            self.meliusnet_block_configs = self.config_string.split('T')
            for i, block_string in enumerate(self.meliusnet_block_configs):
                self._make_repeated_base_blocks(block_string, i)
                if i != len(block_config) - 1:
                    self._make_transition(i)
            self.finalize = nn.HybridSequential(prefix='')
            self.finalize.add(nn.BatchNorm())
            self.finalize.add(nn.Activation('relu'))
            if dilated:
                self.finalize.add(nn.AvgPool2D(pool_size=28))
            else:
                self.finalize.add(
                    nn.AvgPool2D(
                        pool_size=4 if initial_layers == "thumbnail" else 7))
            self.finalize.add(nn.Flatten())

            self.output = nn.Dense(classes)
示例#5
0
def print_summary(net: gluon.HybridBlock, ipt_shape=(1, 3, 416, 416)):
    ctx = net.collect_params().list_ctx()[0]
    ipt = mx.random.uniform(shape=ipt_shape, ctx=ctx)
    net.summary(ipt)
    table = compute_net_params(net)
    logging.info("Parameter Statistics\n" + table.table)
    if wandb.run:
        headers = table.table_data[0]
        data = table.table_data[1:]
        wandb_table = wandb.Table(columns=headers, data=data)
        wandb.log({"Parameters Statistics": wandb_table}, commit=False)
示例#6
0
    def __init__(self, _nfilters, _norm_type='BatchNorm', **kwards):
        HybridBlock.__init__(self, **kwards)

        with self.name_scope():

            # This performs convolution, no BatchNormalization. No need for bias.
            self.up = UpSample(_nfilters, _norm_type=_norm_type)

            self.conv_normed = Conv2DNormed(channels=_nfilters,
                                            kernel_size=(1, 1),
                                            padding=(0, 0),
                                            _norm_type=_norm_type)
示例#7
0
 def __init__(self,_nfilters, factor = 2,  _norm_type='BatchNorm', **kwards):
     HybridBlock.__init__(self,**kwards)
     
     
     self.factor = factor
     self.nfilters = _nfilters // self.factor
     
     with self.name_scope():
         self.convup_normed = Conv2DNormed(self.nfilters,
                                           kernel_size = (1,1),
                                           _norm_type = _norm_type, 
                                           prefix="_convdn_")
示例#8
0
    def __init__(self,
                 nfilters,
                 factor=2,
                 _norm_type='BatchNorm',
                 norm_groups=None,
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.factor = factor
        self.nfilters = nfilters // self.factor

        self.convup_normed = Conv2DNormed(self.nfilters,
                                          kernel_size=(1, 1),
                                          _norm_type=_norm_type,
                                          norm_groups=norm_groups)
示例#9
0
    def create_predictor(self, transformation: Transformation,
                         trained_network: HybridBlock) -> Predictor:
        prediction_network = GaussianProcessPredictionNetwork(
            prediction_length=self.prediction_length,
            context_length=self.context_length,
            cardinality=self.cardinality,
            num_samples=self.num_eval_samples,
            params=trained_network.collect_params(),
            kernel_output=self.kernel_output,
            params_scaling=self.params_scaling,
            ctx=self.trainer.ctx,
            float_type=self.float_type,
            max_iter_jitter=self.max_iter_jitter,
            jitter_method=self.jitter_method,
            sample_noise=self.sample_noise,
        )

        copy_parameters(net_source=trained_network,
                        net_dest=prediction_network)

        return RepresentableBlockPredictor(
            input_transform=transformation,
            prediction_net=prediction_network,
            batch_size=self.trainer.batch_size,
            freq=self.freq,
            prediction_length=self.prediction_length,
            ctx=self.trainer.ctx,
            float_type=self.float_type,
        )
示例#10
0
    def create_predictor(self, transformation: Transformation,
                         trained_network: HybridBlock) -> Predictor:
        prediction_splitter = self._create_instance_splitter("test")

        prediction_network = NBEATSPredictionNetwork(
            prediction_length=self.prediction_length,
            context_length=self.context_length,
            num_stacks=self.num_stacks,
            widths=self.widths,
            num_blocks=self.num_blocks,
            num_block_layers=self.num_block_layers,
            expansion_coefficient_lengths=self.expansion_coefficient_lengths,
            sharing=self.sharing,
            stack_types=self.stack_types,
            params=trained_network.collect_params(),
            scale=self.scale,
        )

        return RepresentableBlockPredictor(
            input_transform=transformation + prediction_splitter,
            prediction_net=prediction_network,
            batch_size=self.batch_size,
            freq=self.freq,
            prediction_length=self.prediction_length,
            ctx=self.trainer.ctx,
        )
示例#11
0
    def create_predictor(
        self, transformation: Transformation, trained_network: HybridBlock
    ) -> Predictor:
        prediction_network = DeepStatePredictionNetwork(
            num_sample_paths=self.num_sample_paths,
            num_layers=self.num_layers,
            num_cells=self.num_cells,
            cell_type=self.cell_type,
            past_length=self.past_length,
            prediction_length=self.prediction_length,
            issm=self.issm,
            dropout_rate=self.dropout_rate,
            cardinality=self.cardinality,
            embedding_dimension=self.embedding_dimension,
            scaling=self.scaling,
            params=trained_network.collect_params(),
        )

        copy_parameters(trained_network, prediction_network)

        return RepresentableBlockPredictor(
            input_transform=transformation,
            prediction_net=prediction_network,
            batch_size=self.trainer.batch_size,
            freq=self.freq,
            prediction_length=self.prediction_length,
            ctx=self.trainer.ctx,
        )
示例#12
0
    def create_predictor(self, transformation: Transformation,
                         trained_network: HybridBlock) -> Predictor:
        prediction_splitter = self._create_instance_splitter("test")

        prediction_network = DeepStatePredictionNetwork(
            num_layers=self.num_layers,
            num_cells=self.num_cells,
            cell_type=self.cell_type,
            past_length=self.past_length,
            prediction_length=self.prediction_length,
            issm=self.issm,
            dropout_rate=self.dropout_rate,
            cardinality=self.cardinality,
            embedding_dimension=self.embedding_dimension,
            scaling=self.scaling,
            num_parallel_samples=self.num_parallel_samples,
            noise_std_bounds=self.noise_std_bounds,
            prior_cov_bounds=self.prior_cov_bounds,
            innovation_bounds=self.innovation_bounds,
            params=trained_network.collect_params(),
        )

        copy_parameters(trained_network, prediction_network)

        return RepresentableBlockPredictor(
            input_transform=transformation + prediction_splitter,
            prediction_net=prediction_network,
            batch_size=self.batch_size,
            freq=self.freq,
            prediction_length=self.prediction_length,
            ctx=self.trainer.ctx,
        )
示例#13
0
def export_mxnet(model: gluon.HybridBlock,
                 sample_input: List[mx.ndarray.NDArray]):
    model.hybridize()
    model(*sample_input)
    directory = "build"
    if os.path.exists(directory) and os.path.isdir(directory):
        shutil.rmtree(directory)
        os.mkdir(directory)
    model_path = os.path.join(directory, "mxmodel")
    # Create and stored the model
    model.export(model_path)
    zip_files(directory, model_path)
    # Start creating template
    array = [ndarray_to_numpy(x) for x in sample_input]
    dest = get_djl_template("mxmodel.zip", array, add_deps())
    shutil.copy(model_path + ".zip", dest)
示例#14
0
 def __init__(self, nn: HybridBlock, freq: int):
     """
     :param nn: Model to plot parameters of.
     :param freq: Plotting frequency.
     """
     self._params = nn.collect_params()
     self._freq = freq
     self._last_call = 0
示例#15
0
    def __init__(self,
                 _nfilters,
                 _kernel_size=(3, 3),
                 _dilation_rate=(1, 1),
                 _norm_type='BatchNorm',
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.nfilters = _nfilters
        self.kernel_size = _kernel_size
        self.dilation_rate = _dilation_rate

        if (_norm_type == 'BatchNorm'):
            self.norm = gluon.nn.BatchNorm
            _prefix = "_BN"
        elif (_norm_type == 'InstanceNorm'):
            self.norm = gluon.nn.InstanceNorm
            _prefix = "_IN"
        elif (norm_type == 'LayerNorm'):
            self.norm = gluon.nn.LayerNorm
            _prefix = "_LN"
        else:
            raise NotImplementedError

        with self.name_scope():

            # Ensures padding = 'SAME' for ODD kernel selection
            p0 = self.dilation_rate[0] * (self.kernel_size[0] - 1) / 2
            p1 = self.dilation_rate[1] * (self.kernel_size[1] - 1) / 2
            p = (int(p0), int(p1))

            self.BN1 = self.norm(axis=1, prefix=_prefix + "1_")
            self.conv1 = gluon.nn.Conv2D(self.nfilters,
                                         kernel_size=self.kernel_size,
                                         padding=p,
                                         dilation=self.dilation_rate,
                                         use_bias=False,
                                         prefix="_conv1_")
            self.BN2 = self.norm(axis=1, prefix=_prefix + "2_")
            self.conv2 = gluon.nn.Conv2D(self.nfilters,
                                         kernel_size=self.kernel_size,
                                         padding=p,
                                         dilation=self.dilation_rate,
                                         use_bias=True,
                                         prefix="_conv2_")
示例#16
0
def build_optimizer(cfg: dict, net: gluon.HybridBlock):
    lrs = build_lr_scheduler(cfg.pop('lr_scheduler', None))
    cfg['optimizer_params']['lr_scheduler'] = lrs

    net.backbone.collect_params().setattr('lr_mult',
                                          cfg.pop('backbone_lr_mult', 1.0))
    net.backbone.collect_params().setattr('wd_mult',
                                          cfg.pop('backbone_wd_mult', 1.0))
    if cfg.pop('no_wd', False):
        net.collect_params('.*beta|.*gamma|.*bias').setattr('wd_mult', 0.0)

    opt = cfg.pop('type', 'sgd')
    optimizer_params = cfg.pop('optimizer_params', {})
    if amp._amp_initialized:
        cfg['update_on_kvstore'] = False
    trainer = gluon.Trainer(net.collect_params(),
                            opt,
                            optimizer_params=optimizer_params,
                            **cfg)
    if amp._amp_initialized:
        amp.init_trainer(trainer)
    return trainer
示例#17
0
    def __init__(self,_nfilters, _factor=2,  _norm_type='BatchNorm', **kwards): 
        HybridBlock.__init__(self, **kwards)
        
        
        # Double the size of filters, since you will downscale by 2. 
        self.factor = _factor 
        self.nfilters = _nfilters * self.factor
        # I was using a kernel size of 1x1, this is notthing to do with max pooling, or selecting the most dominant number. Now changing that.
        # There is bug somewhere, if I use kernel_size = 2, code crashes with memory-illegal access. 
        # Am not sure it is my bug, or something mxnet related 

        # Kernel = 3, padding = 1 works fine, no bug here in latest version of mxnet. 
        self.kernel_size = (3,3) 
        self.strides = (2,2)
        self.pad = (1,1)


        with self.name_scope():
            self.convdn = gluon.nn.Conv2D(self.nfilters,
                                          kernel_size=self.kernel_size,
                                          strides=self.strides,
                                          padding = self.pad,
                                          use_bias=False,
                                          prefix="_convdn_")
示例#18
0
    def create_predictor(self, transformation: Transformation,
                         trained_network: HybridBlock) -> Predictor:
        prediction_network = SimpleFeedForwardPredictionNetwork(
            num_hidden_dimensions=self.num_hidden_dimensions,
            prediction_length=self.prediction_length,
            context_length=self.context_length,
            distr_output=self.distr_output,
            batch_normalization=self.batch_normalization,
            mean_scaling=self.mean_scaling,
            params=trained_network.collect_params(),
            num_parallel_samples=self.num_parallel_samples,
        )

        return RepresentableBlockPredictor(
            input_transform=transformation,
            prediction_net=prediction_network,
            batch_size=self.trainer.batch_size,
            freq=self.freq,
            prediction_length=self.prediction_length,
            ctx=self.trainer.ctx,
        )
示例#19
0
 def __init__(self, name: str, model: gluon.HybridBlock,
              input_shapes: List[List[int]]):
     self._name = name
     self._sym = self._get_onnx_sym(model, len(input_shapes))
     self._param_dict = model.collect_params()
     self._input_shapes = input_shapes
示例#20
0
 def forward(self, x):
     self.layer_shape = x.shape
     return HybridBlock.forward(self, x)
示例#21
0
    def forward(self, x, t1_indices, t0_indices):
        self.input_shape = x.shape[0]

        return HybridBlock.forward(self, x, t1_indices, t0_indices)
示例#22
0
 def create_training_network(self) -> HybridBlock:
     return HybridBlock()
示例#23
0
    def __init__(self,
                 _nfilters_init,
                 _NClasses,
                 verbose=True,
                 _norm_type='BatchNorm',
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.model_name = "ResUNet_d7_cmtskc"

        self.depth = 7

        self.nfilters = _nfilters_init  # Initial number of filters
        self.NClasses = _NClasses

        with self.name_scope():

            self.encoder = ResUNet_d7_encoder(self.nfilters,
                                              self.NClasses,
                                              _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 1)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(7, nfilters))
            self.UpComb1 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv1 = ResNet_v2_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 2)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(8, nfilters))
            self.UpComb2 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv2 = ResNet_atrous_2_unit(nfilters,
                                                _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 3)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(9, nfilters))
            self.UpComb3 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv3 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 4)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(10, nfilters))
            self.UpComb4 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv4 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 5)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(11, nfilters))
            self.UpComb5 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv5 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            nfilters = self.nfilters * 2**(self.depth - 1 - 6)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(12, nfilters))
            self.UpComb6 = combine_layers(nfilters, _norm_type=_norm_type)
            self.UpConv6 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)

            self.psp_2ndlast = PSP_Pooling(self.nfilters,
                                           _norm_type=_norm_type)

            # Segmenetation logits -- deeper for better reconstruction
            self.logits = gluon.nn.HybridSequential()
            self.logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.logits.add(gluon.nn.Activation('relu'))
            self.logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.logits.add(gluon.nn.Activation('relu'))
            self.logits.add(
                gluon.nn.Conv2D(self.NClasses, kernel_size=1, padding=0))

            # bound logits
            self.bound_logits = gluon.nn.HybridSequential()
            self.bound_logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.bound_logits.add(gluon.nn.Activation('relu'))
            self.bound_logits.add(
                gluon.nn.Conv2D(self.NClasses, kernel_size=1, padding=0))

            # distance logits -- deeper for better reconstruction
            self.distance_logits = gluon.nn.HybridSequential()
            self.distance_logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.distance_logits.add(gluon.nn.Activation('relu'))
            self.distance_logits.add(
                Conv2DNormed(channels=self.nfilters,
                             kernel_size=(3, 3),
                             padding=(1, 1)))
            self.distance_logits.add(gluon.nn.Activation('relu'))
            self.distance_logits.add(
                gluon.nn.Conv2D(self.NClasses, kernel_size=1, padding=0))

            # This layer is trying to identify the exact coloration on HSV scale (cv2 devined)
            self.color_logits = gluon.nn.Conv2D(3, kernel_size=1, padding=0)

            # Last activation, customization for binary results
            if (self.NClasses == 1):
                self.ChannelAct = gluon.nn.HybridLambda(
                    lambda F, x: F.sigmoid(x))
            else:
                self.ChannelAct = gluon.nn.HybridLambda(
                    lambda F, x: F.softmax(x, axis=1))
示例#24
0
文件: gluon.py 项目: aarnphm/BentoML
def save(
    name: str,
    model: gluon.HybridBlock,
    *,
    labels: t.Optional[t.Dict[str, str]] = None,
    custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
    metadata: t.Optional[t.Dict[str, t.Any]] = None,
    model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
    """
    Save a model instance to BentoML modelstore.

    Args:
        name (:code:`str`):
            Name for given model instance. This should pass Python identifier check.
        model (`mxnet.gluon.HybridBlock`):
            Instance of gluon.HybridBlock model to be saved.
        labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
            user-defined labels for managing models, e.g. team=nlp, stage=dev
        custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
            user-defined additional python objects to be saved alongside the model,
            e.g. a tokenizer instance, preprocessor function, model configuration json
        metadata (:code:`Dict[str, Any]`, `optional`,  default to :code:`None`):
            Custom metadata for given model.
        model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
            BentoML modelstore, provided by DI Container.

    Returns:
        :obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.

    Examples:

    .. code-block:: python

        import mxnet
        import mxnet.gluon as gluon
        import bentoml


        def train_gluon_classifier() -> gluon.nn.HybridSequential:
            net = mxnet.gluon.nn.HybridSequential()
            net.hybridize()
            net.forward(mxnet.nd.array(0))
            return net

        model = train_gluon_classifier()

        tag = bentoml.gluon.save("gluon_block", model)
    """  # noqa

    context: t.Dict[str, t.Any] = {
        "framework_name": "gluon",
        "pip_dependencies": [f"mxnet=={get_pkg_version('mxnet')}"],
    }
    options: t.Dict[str, t.Any] = dict()
    with bentoml.models.create(
            name,
            module=MODULE_NAME,
            labels=labels,
            custom_objects=custom_objects,
            options=options,
            context=context,
            metadata=metadata,
    ) as _model:

        model.export(_model.path_of(SAVE_NAMESPACE))

        return _model.tag
示例#25
0
    def __init__(self,
                 _nfilters_init,
                 _NClasses,
                 verbose=True,
                 _norm_type='BatchNorm',
                 **kwards):
        HybridBlock.__init__(self, **kwards)

        self.model_name = "ResUNet_d6_encoder"

        self.depth = 6

        self.nfilters = _nfilters_init  # Initial number of filters
        self.NClasses = _NClasses

        with self.name_scope():

            # First convolution Layer
            # Starting with first convolutions to make the input "channel" dim equal to the number of initial filters
            self.conv_first_normed = Conv2DNormed(channels=self.nfilters,
                                                  kernel_size=(1, 1),
                                                  _norm_type=_norm_type,
                                                  prefix="_conv_first_")

            # Progressively reducing the dilation_rate of Atrous convolutions (the deeper the smaller).

            # Usually 32
            nfilters = self.nfilters * 2**(0)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(0, nfilters))
            self.Dn1 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)
            self.pool1 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 64
            nfilters = self.nfilters * 2**(1)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(1, nfilters))
            self.Dn2 = ResNet_atrous_unit(nfilters, _norm_type=_norm_type)
            self.pool2 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 128
            nfilters = self.nfilters * 2**(2)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(2, nfilters))
            self.Dn3 = ResNet_atrous_2_unit(nfilters, _norm_type=_norm_type)
            self.pool3 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 256
            nfilters = self.nfilters * 2**(3)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(3, nfilters))
            self.Dn4 = ResNet_atrous_2_unit(nfilters,
                                            _dilation_rates=[3, 5],
                                            _norm_type=_norm_type)
            self.pool4 = DownSample(nfilters, _norm_type=_norm_type)

            # Usually 512
            nfilters = self.nfilters * 2**(4)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(4, nfilters))
            self.Dn5 = ResNet_v2_unit(nfilters, _norm_type=_norm_type)
            self.pool5 = DownSample(nfilters)

            # Usually 1024
            nfilters = self.nfilters * 2**(5)
            if verbose:
                print("depth:= {0}, nfilters: {1}".format(5, nfilters))
            self.Dn6 = ResNet_v2_unit(nfilters)

            # Same number of filters, with new definition
            self.middle = PSP_Pooling(nfilters, _norm_type=_norm_type)