Esempio n. 1
0
    def __task_placeholder(self):

        self._placehold['image'] = tf.placeholder(dtype=tfu.tf_type(
            self._param['param_dtype']).Type,
                                                  shape=[None, None, None, 1],
                                                  name='image')

        self._placehold['GT'] = tf.placeholder(
            dtype=tfu.tf_type(self._param['param_dtype']).Type,
            shape=[None, None, None, self._param['class_amount']],
            name='GT')

        self._placehold['training'] = tf.placeholder(dtype=tf.bool,
                                                     name='training')

        pass
Esempio n. 2
0
def __reducor_for_DenseUNet(output_map, training, params):
    param_dtype = tfu.tf_type(params.get('param_dtype')).Type
    regularize_weight = params.get('regularize_weight')
    grow = params.get('grows')
    kernel = params.get('kernels')
    layer_param = params.get('layer_param')
    layer_param['training'] = training
    output_map = dmb.DenseNetBlockLayers(output_map, param_dtype, grow,
                                         'reducor', regularize_weight, kernel,
                                         layer_param)
    return output_map
    pass
Esempio n. 3
0
 def __build_Dense_UNet_with_loss(self):
     output_map = mb.DenseUNetPro(
         self._placehold['image'],
         training=self._placehold['training'],
         class_amount=self._param['class_amount'],
         param_dtype=tfu.tf_type(self._param['param_dtype']).Type,
         regularizer_weight=self._param['regularizer_weight'],
         DenseUNetConfig=self._param_UNet)
     self._pro = output_map
     self._acc = mb.fcn_acc(
         output_map,
         self._placehold['GT'],
     )
     cw = self._param_loss.get('class_weights', None)
     self._loss = mb.fcn_loss(output_map,
                              self._placehold['GT'],
                              cost_name=self._param_loss['cost_name'],
                              param_dtype=self._param['param_dtype'],
                              class_weights=cw)
     pass
Esempio n. 4
0
def __base_feature(output_map, params):
    filter = params.get('feature_amount')
    kernel = params.get('kernel')
    stride = params.get('stride')
    param_dtype = tfu.tf_type(params.get('param_dtype')).Type
    regularize_weight = params.get('regularize_weight')

    output_map = tf.layers.conv2d(
        output_map,
        filter,
        kernel,
        stride,
        "same",
        activation=tf.nn.relu,
        kernel_initializer=tf.variance_scaling_initializer(mode='fan_avg',
                                                           dtype=param_dtype),
        kernel_regularizer=layers.l2_regularizer(regularize_weight),
        bias_initializer=tf.zeros_initializer(dtype=param_dtype),
        bias_regularizer=layers.l2_regularizer(regularize_weight),
        name='base')
    return output_map
    pass
Esempio n. 5
0
def __DenseUNet(output_map, training, DenseUNetConfig):
    """

    :param output_map:
    :param training:
    :param DenseUNetConfig:
# {
#   "BaseModel": "DenseNet",
#   "BaseFeature":{
#     "feature_amount": 32,
#     "kernel": [3, 3],
#     "stride": [1, 1],
#     "param_dtype": 0.32,
#     "regularize_weight": 0.0001
#   },
#   "DenseNet":[
#     {
#       "param_dtype": 0.32,
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#       "pool_kernel": [2, 2],
#       "pool_stride": [2, 2],
#       "pool_type": "max",
#       "layer_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         }
#       },
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param": {
#           "type": "ReLU"
#         },
#         "compress_rate": null,
#         "dropout_rate": 0.1
#       }
#     },
#     {
#       "param_dtype": 0.32,
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#       "pool_kernel": [2, 2],
#       "pool_stride": [2, 2],
#       "pool_type": "max",
#       "layer_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         }
#       },
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param": {
#           "type": "ReLU"
#         },
#         "compress_rate": null,
#         "dropout_rate": 0.1
#       }
#     },
#     {
#       "param_dtype": 0.32,
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#       "pool_kernel": [2, 2],
#       "pool_stride": [2, 2],
#       "pool_type": "max",
#       "layer_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         }
#       },
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param": {
#           "type": "ReLU"
#         },
#         "compress_rate": null,
#         "dropout_rate": 0.1
#       }
#     },
#     {
#       "param_dtype": 0.32,
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#       "pool_kernel": [2, 2],
#       "pool_stride": [2, 2],
#       "pool_type": "max",
#       "layer_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         }
#       },
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param": {
#           "type": "ReLU"
#         },
#         "compress_rate": null,
#         "dropout_rate": 0.1
#       }
#     }
#   ],
#   "DeDenseNet":[
#     {
#       "param_dtype": 0.32,
#
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#
#       "t_kernel": [3, 3],
#       "t_stride": [2, 2],
#       "compress_rate": 0.3,
#
#       "layer_param":{
#         "batch_normal": true,
#         "activate":{
#           "type": "ReLU"
#         }
#       },
#
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         },
#         "dropout_rate": 0.1
#       }
#     },
#     {
#       "param_dtype": 0.32,
#
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#
#       "t_kernel": [3, 3],
#       "t_stride": [2, 2],
#       "compress_rate": 0.3,
#
#       "layer_param":{
#         "batch_normal": true,
#         "activate":{
#           "type": "ReLU"
#         }
#       },
#
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         },
#         "dropout_rate": 0.1
#       }
#     },
#     {
#       "param_dtype": 0.32,
#
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#
#       "t_kernel": [3, 3],
#       "t_stride": [2, 2],
#       "compress_rate": 0.3,
#
#       "layer_param":{
#         "batch_normal": true,
#         "activate":{
#           "type": "ReLU"
#         }
#       },
#
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         },
#         "dropout_rate": 0.1
#       }
#     },
#     {
#       "param_dtype": 0.32,
#
#       "grows": [3, 3, 3],
#       "regularize_weight": 0.0001,
#       "kernels": [[3, 3], [3, 3], [3, 3]],
#
#       "t_kernel": [3, 3],
#       "t_stride": [2, 2],
#       "compress_rate": 0.3,
#
#       "layer_param":{
#         "batch_normal": true,
#         "activate":{
#           "type": "ReLU"
#         }
#       },
#
#       "transition_param":{
#         "batch_normal": true,
#         "activate_param":{
#           "type": "ReLU"
#         },
#         "dropout_rate": 0.1
#       }
#     }
#   ],
#   "BlockReducor":{
#     "param_dtype": 0.32,
#     "regularize_weight": 0.0001,
#     "grows": [3, 2, 1],
#     "kernels": [[1, 1], [2, 2], [3, 3]],
#     "layer_param":{
#       "batch_normal": true,
#       "activate":{
#         "type": "ReLU"
#       }
#     }
#   }
# }

    :return:
    """
    BaseFeature = DenseUNetConfig.get('BaseFeature')
    DenseNetConfig = DenseUNetConfig.get('DenseNet')
    DeDenseNetConfig = DenseUNetConfig.get('DeDenseNet')
    BlockReducor = DenseUNetConfig.get('BlockReducor')

    output_map = __base_feature(output_map, BaseFeature)

    cl = dmb.DenseNetProvide()
    cld = dmb.DeDenseNetProvide()
    output_map = dmb.DenseNetFromParamDict(output_map,
                                           training,
                                           DenseNetConfig,
                                           dense_net_provide=cl,
                                           block_name_flag='encode-')
    block_layer_want = cl.BlockLayer[-1][-1]
    cl.BlockLayer.reverse()

    output_map = __reducor_for_DenseUNet(output_map, training, BlockReducor)

    de_block_name = 0
    for encode_block_layer in zip(cl.BlockLayer, DeDenseNetConfig):
        DeDenseNetBlockConfig = encode_block_layer[1]
        param_dtype = tfu.tf_type(
            DeDenseNetBlockConfig.get('param_dtype')).Type
        grows = DeDenseNetBlockConfig.get('grows')
        regularize_weight = DeDenseNetBlockConfig.get('regularize_weight')
        kernels = DeDenseNetBlockConfig.get('kernels')
        t_kernel = DeDenseNetBlockConfig.get('t_kernel')
        t_stride = DeDenseNetBlockConfig.get('t_stride')
        compress_rate = DeDenseNetBlockConfig.get('compress_rate')
        layer_param = DeDenseNetBlockConfig.get('layer_param')
        layer_param['training'] = training
        transition_param = DeDenseNetBlockConfig.get('transition_param')
        transition_param['training'] = training
        to_concat = encode_block_layer[0][-1]
        cld.push_block()
        output_map = dmb.DeDenseNetBlockTransition(
            output_map, param_dtype,
            'decode_{0}_{1}'.format(de_block_name,
                                    'transition'), regularize_weight, t_kernel,
            t_stride, compress_rate, **transition_param)
        output_map = tf.concat([to_concat, output_map],
                               axis=-1,
                               name='decode_{0}_{1}'.format(
                                   de_block_name, 'concat'))
        cld.push_transition(output_map)
        output_map = dmb.DeDenseNetBlockLayers(
            output_map,
            param_dtype,
            grows,
            'decode_{0}_{1}'.format(de_block_name, 'block_layer'),
            regularize_weight,
            kernels,
            layer_param,
        )
        cld.push_block_layer(output_map)
        de_block_name += 1
        pass
    return output_map
    pass