コード例 #1
0
def SemanticFeatureGenerationBlock(inputs, D_features, D_prime_features, O_features, bottleneck_factor=2, cardinality=32):

    d_1 = ConvBlock(inputs, D_features, kernel_size=[3, 3])
    pool_1 = slim.pool(d_1, [5, 5], stride=[1, 1], pooling_type='MAX')
    d_prime_1 = ConvBlock(pool_1, D_prime_features, kernel_size=[3, 3])

    d_2 = ConvBlock(pool_1, D_features, kernel_size=[3, 3])
    pool_2 = slim.pool(d_2, [5, 5], stride=[1, 1], pooling_type='MAX')
    d_prime_2 = ConvBlock(pool_2, D_prime_features, kernel_size=[3, 3])

    d_3 = ConvBlock(pool_2, D_features, kernel_size=[3, 3])
    pool_3 = slim.pool(d_3, [5, 5], stride=[1, 1], pooling_type='MAX')
    d_prime_3 = ConvBlock(pool_3, D_prime_features, kernel_size=[3, 3])

    d_4 = ConvBlock(pool_3, D_features, kernel_size=[3, 3])
    pool_4 = slim.pool(d_4, [5, 5], stride=[1, 1], pooling_type='MAX')
    d_prime_4 = ConvBlock(pool_4, D_prime_features, kernel_size=[3, 3])


    net = tf.concat([d_prime_1, d_prime_2, d_prime_3, d_prime_4], axis=-1)

    net = ConvBlock(net, n_filters=D_features, kernel_size=[3, 3])

    net = ResNeXtBlock(net, n_filters_out=D_features, bottleneck_factor=bottleneck_factor)
    net = ResNeXtBlock(net, n_filters_out=D_features, bottleneck_factor=bottleneck_factor)
    net = ResNeXtBlock(net, n_filters_out=D_features, bottleneck_factor=bottleneck_factor)
    net = ResNeXtBlock(net, n_filters_out=D_features, bottleneck_factor=bottleneck_factor)

    net = ConvBlock(net, O_features, kernel_size=[3, 3])

    return net
コード例 #2
0
def FullResolutionResidualUnit(pool_stream, res_stream, n_filters_3, n_filters_1, pool_scale):
    """
    A full resolution residual unit

    Arguments:
      pool_stream: The inputs from the pooling stream
      res_stream: The inputs from the residual stream
      n_filters_3: Number of output feature maps for each 3x3 conv
      n_filters_1: Number of output feature maps for each 1x1 conv
      pool_scale: scale of the pooling layer i.e window size and stride

    Returns:
      Output of full resolution residual block
    """

    G = tf.concat([pool_stream, slim.pool(res_stream, [pool_scale, pool_scale], stride=[pool_scale, pool_scale], pooling_type='MAX')], axis=-1)



    net = slim.conv2d(G, n_filters_3, kernel_size=3, activation_fn=None)
    net = slim.batch_norm(net, fused=True)
    net = tf.nn.relu(net)
    net = slim.conv2d(net, n_filters_3, kernel_size=3, activation_fn=None)
    net = slim.batch_norm(net, fused=True)
    pool_stream_out = tf.nn.relu(net)

    net = slim.conv2d(pool_stream_out, n_filters_1, kernel_size=1, activation_fn=None)
    net = Upsampling(net, scale=pool_scale)
    res_stream_out = tf.add(res_stream, net)

    return pool_stream_out, res_stream_out
コード例 #3
0
def TransitionDown(inputs, n_filters, dropout_p=0.2, scope=None):
    """
  Transition Down (TD) for FC-DenseNet
  Apply 1x1 BN + ReLU + conv then 2x2 max pooling
  """
    with tf.name_scope(scope) as sc:
        l = preact_conv(inputs,
                        n_filters,
                        kernel_size=[1, 1],
                        dropout_p=dropout_p)
        l = slim.pool(l, [2, 2], stride=[2, 2], pooling_type='MAX')
        return l
コード例 #4
0
def InterpBlock(net, level, feature_map_shape, pooling_type):
    
    # Compute the kernel and stride sizes according to how large the final feature map will be
    # When the kernel size and strides are equal, then we can compute the final feature map size
    # by simply dividing the current size by the kernel or stride size
    # The final feature map sizes are 1x1, 2x2, 3x3, and 6x6. We round to the closest integer
    kernel_size = [int(np.round(float(feature_map_shape[0]) / float(level))), int(np.round(float(feature_map_shape[1]) / float(level)))]
    stride_size = kernel_size

    net = slim.pool(net, kernel_size, stride=stride_size, pooling_type='MAX')
    net = slim.conv2d(net, 512, [1, 1], activation_fn=None)
    net = slim.batch_norm(net, fused=True)
    net = tf.nn.relu(net)
    net = Upsampling_by_shape(net, feature_map_shape)
    return net
コード例 #5
0
def build_encoder_decoder(inputs,
                          num_classes,
                          preset_model="Encoder-Decoder",
                          dropout_p=0.5,
                          scope=None):
    """
	Builds the Encoder-Decoder model. Inspired by SegNet with some modifications
	Optionally includes skip connections

	Arguments:
	  inputs: the input tensor
	  n_classes: number of classes
	  dropout_p: dropout rate applied after each convolution (0. for not using)

	Returns:
	  Encoder-Decoder model
	"""

    if preset_model == "Encoder-Decoder":
        has_skip = False
    elif preset_model == "Encoder-Decoder-Skip":
        has_skip = True
    else:
        raise ValueError(
            "Unsupported Encoder-Decoder model '%s'. This function only supports Encoder-Decoder and Encoder-Decoder-Skip"
            % (preset_model))

    #####################
    # Downsampling path #
    #####################
    net = conv_block(inputs, 64)
    net = conv_block(net, 64)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_1 = net

    net = conv_block(net, 128)
    net = conv_block(net, 128)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_2 = net

    net = conv_block(net, 256)
    net = conv_block(net, 256)
    net = conv_block(net, 256)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_3 = net

    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_4 = net

    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')

    #####################
    # Upsampling path #
    #####################
    net = conv_transpose_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    if has_skip:
        net = tf.add(net, skip_4)

    net = conv_transpose_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 256)
    if has_skip:
        net = tf.add(net, skip_3)

    net = conv_transpose_block(net, 256)
    net = conv_block(net, 256)
    net = conv_block(net, 256)
    net = conv_block(net, 128)
    if has_skip:
        net = tf.add(net, skip_2)

    net = conv_transpose_block(net, 128)
    net = conv_block(net, 128)
    net = conv_block(net, 64)
    if has_skip:
        net = tf.add(net, skip_1)

    net = conv_transpose_block(net, 64)
    net = conv_block(net, 64)
    net = conv_block(net, 64)

    #####################
    #      Softmax      #
    #####################
    net = slim.conv2d(net,
                      num_classes, [1, 1],
                      activation_fn=None,
                      scope='logits')
    return net
コード例 #6
0
def build_frrn(inputs, num_classes, preset_model='FRRN-A'):
    """
    Builds the Full Resolution Residual Network model.

    Arguments:
      inputs: The input tensor
      preset_model: Which model you want to use. Select FRRN-A or FRRN-B
      num_classes: Number of classes

    Returns:
      FRRN model
    """

    if preset_model == 'FRRN-A':

        #####################
        # Initial Stage
        #####################
        net = slim.conv2d(inputs, 48, kernel_size=5, activation_fn=None)
        net = slim.batch_norm(net, fused=True)
        net = tf.nn.relu(net)

        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)


        #####################
        # Downsampling Path
        #####################
        pool_stream = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
        res_stream = slim.conv2d(net, 32, kernel_size=1, activation_fn=None)

        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)

        pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)

        pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)

        pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)

        #####################
        # Upsampling Path
        #####################
        pool_stream = Unpooling(pool_stream, 2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)

        pool_stream = Unpooling(pool_stream, 2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)

        pool_stream = Unpooling(pool_stream, 2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)

        pool_stream = Unpooling(pool_stream, 2)

        #####################
        # Final Stage
        #####################
        net = tf.concat([pool_stream, res_stream], axis=-1)
        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)

        net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
        return net


    elif preset_model == 'FRRN-B':
        #####################
        # Initial Stage
        #####################
        net = slim.conv2d(inputs, 48, kernel_size=5, activation_fn=None)
        net = slim.batch_norm(net, fused=True)
        net = tf.nn.relu(net)

        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)


        #####################
        # Downsampling Path
        #####################
        pool_stream = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
        res_stream = slim.conv2d(net, 32, kernel_size=1, activation_fn=None)

        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)

        pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)

        pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)

        pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)

        pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=32)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=32)

        #####################
        # Upsampling Path
        #####################
        pool_stream = Unpooling(pool_stream, 2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=17)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=16)

        pool_stream = Unpooling(pool_stream, 2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)

        pool_stream = Unpooling(pool_stream, 2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)

        pool_stream = Unpooling(pool_stream, 2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
        pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)

        pool_stream = Unpooling(pool_stream, 2)

        #####################
        # Final Stage
        #####################
        net = tf.concat([pool_stream, res_stream], axis=-1)
        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)
        net = ResidualUnit(net, n_filters=48, filter_size=3)

        net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
        return net

    else:
        raise ValueError("Unsupported FRRN model '%s'. This function only supports FRRN-A and FRRN-B" % (preset_model))
コード例 #7
0
def build_encoder_decoder_skip(inputs, num_classes, dropout_p=0.5, scope=None):
    """
	Builds the Encoder-Decoder-Skip model. Inspired by SegNet with some modifications
	Includes skip connections

	Arguments:
	  inputs: the input tensor
	  n_classes: number of classes
	  dropout_p: dropout rate applied after each convolution (0. for not using)

	Returns:
	  Encoder-Decoder model
	"""

    #####################
    # Downsampling path #
    #####################
    net = conv_block(inputs, 64)
    net = conv_block(net, 64)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_1 = net

    net = conv_block(net, 128)
    net = conv_block(net, 128)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_2 = net

    net = conv_block(net, 256)
    net = conv_block(net, 256)
    net = conv_block(net, 256)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_3 = net

    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
    skip_4 = net

    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')

    #####################
    # Upsampling path #
    #####################
    net = conv_transpose_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = tf.add(net, skip_4)

    net = conv_transpose_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 512)
    net = conv_block(net, 256)
    net = tf.add(net, skip_3)

    net = conv_transpose_block(net, 256)
    net = conv_block(net, 256)
    net = conv_block(net, 256)
    net = conv_block(net, 128)
    net = tf.add(net, skip_2)

    net = conv_transpose_block(net, 128)
    net = conv_block(net, 128)
    net = conv_block(net, 64)
    net = tf.add(net, skip_1)

    net = conv_transpose_block(net, 64)
    net = conv_block(net, 64)
    net = conv_block(net, 64)

    #####################
    #      Softmax      #
    #####################
    net = slim.conv2d(net, num_classes, [1, 1], scope='logits')
    return net
コード例 #8
0
def build_mobile_unet(inputs, preset_model, num_classes):

	has_skip = False
	if preset_model == "MobileUNet":
		has_skip = False
	elif preset_model == "MobileUNet-Skip":
		has_skip = True
	else:
		raise ValueError("Unsupported MobileUNet model '%s'. This function only supports MobileUNet and MobileUNet-Skip" % (preset_model))

    #####################
	# Downsampling path #
	#####################
	net = ConvBlock(inputs, 64)
	net = DepthwiseSeparableConvBlock(net, 64)
	net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
	skip_1 = net

	net = DepthwiseSeparableConvBlock(net, 128)
	net = DepthwiseSeparableConvBlock(net, 128)
	net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
	skip_2 = net

	net = DepthwiseSeparableConvBlock(net, 256)
	net = DepthwiseSeparableConvBlock(net, 256)
	net = DepthwiseSeparableConvBlock(net, 256)
	net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
	skip_3 = net

	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
	skip_4 = net

	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')


	#####################
	# Upsampling path #
	#####################
	net = conv_transpose_block(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	if has_skip:
		net = tf.add(net, skip_4)

	net = conv_transpose_block(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 512)
	net = DepthwiseSeparableConvBlock(net, 256)
	if has_skip:
		net = tf.add(net, skip_3)

	net = conv_transpose_block(net, 256)
	net = DepthwiseSeparableConvBlock(net, 256)
	net = DepthwiseSeparableConvBlock(net, 256)
	net = DepthwiseSeparableConvBlock(net, 128)
	if has_skip:
		net = tf.add(net, skip_2)

	net = conv_transpose_block(net, 128)
	net = DepthwiseSeparableConvBlock(net, 128)
	net = DepthwiseSeparableConvBlock(net, 64)
	if has_skip:
		net = tf.add(net, skip_1)

	net = conv_transpose_block(net, 64)
	net = DepthwiseSeparableConvBlock(net, 64)
	net = DepthwiseSeparableConvBlock(net, 64)

	#####################
	#      Softmax      #
	#####################
	net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
	return net
コード例 #9
0
ファイル: AdapNet.py プロジェクト: DavidKleindienst/Darea
def build_adaptnet(inputs, num_classes):
    """
    Builds the AdaptNet model. 

    Arguments:
      inputs: The input tensor= 
      preset_model: Which model you want to use. Select which ResNet model to use for feature extraction 
      num_classes: Number of classes

    Returns:
      AdaptNet model
    """
    net = ConvBlock(inputs, n_filters=64, kernel_size=[3, 3])
    net = ConvBlock(net, n_filters=64, kernel_size=[7, 7], stride=2)
    net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')

    net = ResNetBlock_2(net, filters_1=64, filters_2=256, s=1)
    net = ResNetBlock_1(net, filters_1=64, filters_2=256)
    net = ResNetBlock_1(net, filters_1=64, filters_2=256)

    net = ResNetBlock_2(net, filters_1=128, filters_2=512, s=2)
    net = ResNetBlock_1(net, filters_1=128, filters_2=512)
    net = ResNetBlock_1(net, filters_1=128, filters_2=512)

    skip_connection = ConvBlock(net, n_filters=12, kernel_size=[1, 1])

    net = MultiscaleBlock_1(net,
                            filters_1=128,
                            filters_2=512,
                            filters_3=64,
                            p=1,
                            d=2)

    net = ResNetBlock_2(net, filters_1=256, filters_2=1024, s=2)
    net = ResNetBlock_1(net, filters_1=256, filters_2=1024)
    net = MultiscaleBlock_1(net,
                            filters_1=256,
                            filters_2=1024,
                            filters_3=64,
                            p=1,
                            d=2)
    net = MultiscaleBlock_1(net,
                            filters_1=256,
                            filters_2=1024,
                            filters_3=64,
                            p=1,
                            d=4)
    net = MultiscaleBlock_1(net,
                            filters_1=256,
                            filters_2=1024,
                            filters_3=64,
                            p=1,
                            d=8)
    net = MultiscaleBlock_1(net,
                            filters_1=256,
                            filters_2=1024,
                            filters_3=64,
                            p=1,
                            d=16)

    net = MultiscaleBlock_2(net,
                            filters_1=512,
                            filters_2=2048,
                            filters_3=512,
                            p=2,
                            d=4)
    net = MultiscaleBlock_1(net,
                            filters_1=512,
                            filters_2=2048,
                            filters_3=512,
                            p=2,
                            d=8)
    net = MultiscaleBlock_1(net,
                            filters_1=512,
                            filters_2=2048,
                            filters_3=512,
                            p=2,
                            d=16)

    net = ConvBlock(net, n_filters=12, kernel_size=[1, 1])
    net = Upsampling(net, scale=2)

    net = tf.add(skip_connection, net)

    net = Upsampling(net, scale=8)

    net = slim.conv2d(net,
                      num_classes, [1, 1],
                      activation_fn=None,
                      scope='logits')

    return net