def build_refinenet(inputs, num_classes, preset_model='RefineNet-Res101', weight_decay=1e-5, is_training=True, upscaling_method="bilinear", pretrained_dir="models"): """ Builds the RefineNet model. Arguments: inputs: The input tensor preset_model: Which model you want to use. Select which ResNet model to use for feature extraction num_classes: Number of classes Returns: RefineNet model """ if preset_model == 'RefineNet-Res50': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_50( inputs, is_training=is_training, scope='resnet_v2_50') # RefineNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50')) elif preset_model == 'RefineNet-Res101': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_101( inputs, is_training=is_training, scope='resnet_v2_101') # RefineNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101')) elif preset_model == 'RefineNet-Res152': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_152( inputs, is_training=is_training, scope='resnet_v2_152') # RefineNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152')) else: raise ValueError( "Unsupported ResNet model '%s'. This function only supports ResNet 101 and ResNet 152" % (preset_model)) high = [ end_points['pool5'], end_points['pool4'], end_points['pool3'], end_points['pool2'] ] low = [None, None, None, None] # Get the feature maps to the proper size with bottleneck high[0] = slim.conv2d(high[0], 512, 1) high[1] = slim.conv2d(high[1], 256, 1) high[2] = slim.conv2d(high[2], 256, 1) high[3] = slim.conv2d(high[3], 256, 1) # RefineNet low[0] = RefineBlock(high_inputs=high[0], low_inputs=None) # Only input ResNet 1/32 low[1] = RefineBlock( high[1], low[0]) # High input = ResNet 1/16, Low input = Previous 1/16 low[2] = RefineBlock( high[2], low[1]) # High input = ResNet 1/8, Low input = Previous 1/8 low[3] = RefineBlock( high[3], low[2]) # High input = ResNet 1/4, Low input = Previous 1/4 # g[3]=Upsampling(g[3],scale=4) net = low[3] if upscaling_method.lower() == "conv": net = ConvUpscaleBlock(net, 128, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 128) net = ConvUpscaleBlock(net, 64, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 64) elif upscaling_method.lower() == "bilinear": net = Upsampling(net, scale=4) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net, init_fn
def build_deeplabv3_plus(inputs, num_classes, preset_model='DeepLabV3+-Res50', weight_decay=1e-5, is_training=True, pretrained_dir="models"): """ Builds the DeepLabV3 model. Arguments: inputs: The input tensor= preset_model: Which model you want to use. Select which ResNet model to use for feature extraction num_classes: Number of classes Returns: DeepLabV3 model """ if preset_model == 'DeepLabV3_plus-Res50': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_50( inputs, is_training=is_training, scope='resnet_v2_50') resnet_scope = 'resnet_v2_50' # DeepLabV3 requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50')) elif preset_model == 'DeepLabV3_plus-Res101': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_101( inputs, is_training=is_training, scope='resnet_v2_101') resnet_scope = 'resnet_v2_101' # DeepLabV3 requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101')) elif preset_model == 'DeepLabV3_plus-Res152': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_152( inputs, is_training=is_training, scope='resnet_v2_152') resnet_scope = 'resnet_v2_152' # DeepLabV3 requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152')) else: raise ValueError( "Unsupported ResNet model '%s'. This function only supports ResNet 50, ResNet 101, and ResNet 152" % (preset_model)) label_size = tf.shape(inputs)[1:3] encoder_features = end_points['pool2'] net = AtrousSpatialPyramidPoolingModule(end_points['pool4']) decoder_features = Upsampling(net, label_size / 4) encoder_features = slim.conv2d(encoder_features, 48, [1, 1], activation_fn=tf.nn.relu, normalizer_fn=None) net = tf.concat((encoder_features, decoder_features), axis=3) net = slim.conv2d(encoder_features, 256, [3, 3], activation_fn=tf.nn.relu, normalizer_fn=None) net = slim.conv2d(encoder_features, 256, [3, 3], activation_fn=tf.nn.relu, normalizer_fn=None) net = Upsampling(net, label_size) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net, init_fn
def build_deeplabv3(inputs, num_classes, preset_model='DeepLabV3-Res50', upscaling_method="bilinear", weight_decay=1e-5, is_training=True, pretrained_dir="models"): """ Builds the DeepLabV3 model. Arguments: inputs: The input tensor= preset_model: Which model you want to use. Select which ResNet model to use for feature extraction num_classes: Number of classes Returns: DeepLabV3 model """ inputs = mean_image_subtraction(inputs) if preset_model == 'DeepLabV3-Res50': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_50( inputs, is_training=is_training, scope='resnet_v2_50') resnet_scope = 'resnet_v2_50' # DeepLabV3 requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50')) elif preset_model == 'DeepLabV3-Res101': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_101( inputs, is_training=is_training, scope='resnet_v2_101') resnet_scope = 'resnet_v2_101' # DeepLabV3 requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101')) elif preset_model == 'DeepLabV3-Res152': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_152( inputs, is_training=is_training, scope='resnet_v2_152') resnet_scope = 'resnet_v2_152' # DeepLabV3 requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152')) else: raise ValueError( "Unsupported ResNet model '%s'. This function only supports ResNet 50, ResNet 101, and ResNet 152" % (preset_model)) label_size = tf.shape(inputs)[1:3] net = AtrousSpatialPyramidPoolingModule(end_points['pool5']) if upscaling_method.lower() == "conv": net = ConvUpscaleBlock(net, 256, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 256) net = ConvUpscaleBlock(net, 128, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 128) net = ConvUpscaleBlock(net, 64, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 64) elif upscaling_method.lower() == "bilinear": net = Upsampling(net, label_size) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net, init_fn
def build_pspnet(inputs, label_size, num_classes, preset_model='PSPNet-Res50', pooling_type="MAX", weight_decay=1e-5, upscaling_method="bilinear", is_training=True, pretrained_dir="models"): """ Builds the PSPNet model. Arguments: inputs: The input tensor label_size: Size of the final label tensor. We need to know this for proper upscaling preset_model: Which model you want to use. Select which ResNet model to use for feature extraction num_classes: Number of classes pooling_type: Max or Average pooling Returns: PSPNet model """ inputs = mean_image_subtraction(inputs) if preset_model == 'PSPNet-Res50': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_50( inputs, is_training=is_training, scope='resnet_v2_50') resnet_scope = 'resnet_v2_50' # PSPNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50')) elif preset_model == 'PSPNet-Res101': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_101( inputs, is_training=is_training, scope='resnet_v2_101') resnet_scope = 'resnet_v2_101' # PSPNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101')) elif preset_model == 'PSPNet-Res152': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_152( inputs, is_training=is_training, scope='resnet_v2_152') resnet_scope = 'resnet_v2_152' # PSPNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152')) else: raise ValueError( "Unsupported ResNet model '%s'. This function only supports ResNet 50, ResNet 101, and ResNet 152" % (preset_model)) feature_map_shape = [int(x / 8.0) for x in label_size] print(feature_map_shape) psp = PyramidPoolingModule(end_points['pool3'], feature_map_shape=feature_map_shape, pooling_type=pooling_type) net = slim.conv2d(psp, 512, [3, 3], activation_fn=None) net = slim.batch_norm(net, fused=True) net = tf.nn.relu(net) if upscaling_method.lower() == "conv": net = ConvUpscaleBlock(net, 256, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 256) net = ConvUpscaleBlock(net, 128, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 128) net = ConvUpscaleBlock(net, 64, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 64) elif upscaling_method.lower() == "bilinear": net = Upsampling(net, label_size) net = slim.dropout(net, keep_prob=(0.9)) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net, init_fn
def build_gcn(inputs, num_classes, preset_model='GCN-Res101', weight_decay=1e-5, is_training=True, upscaling_method="bilinear", pretrained_dir="models"): """ Builds the GCN model. Arguments: inputs: The input tensor preset_model: Which model you want to use. Select which ResNet model to use for feature extraction num_classes: Number of classes Returns: GCN model """ if preset_model == 'GCN-Res50': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_50( inputs, is_training=is_training, scope='resnet_v2_50') resnet_scope = 'resnet_v2_50' # GCN requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50')) elif preset_model == 'GCN-Res101': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_101( inputs, is_training=is_training, scope='resnet_v2_101') resnet_scope = 'resnet_v2_101' # GCN requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101')) elif preset_model == 'GCN-Res152': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_152( inputs, is_training=is_training, scope='resnet_v2_152') resnet_scope = 'resnet_v2_152' # GCN requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152')) else: raise ValueError( "Unsupported ResNet model '%s'. This function only supports ResNet 101 and ResNet 152" % (preset_model)) res = [ end_points['pool5'], end_points['pool4'], end_points['pool3'], end_points['pool2'] ] down_5 = GlobalConvBlock(res[0], n_filters=21, size=3) down_5 = BoundaryRefinementBlock(down_5, n_filters=21, kernel_size=[3, 3]) down_5 = ConvUpscaleBlock(down_5, n_filters=21, kernel_size=[3, 3], scale=2) down_4 = GlobalConvBlock(res[1], n_filters=21, size=3) down_4 = BoundaryRefinementBlock(down_4, n_filters=21, kernel_size=[3, 3]) down_4 = tf.add(down_4, down_5) down_4 = BoundaryRefinementBlock(down_4, n_filters=21, kernel_size=[3, 3]) down_4 = ConvUpscaleBlock(down_4, n_filters=21, kernel_size=[3, 3], scale=2) down_3 = GlobalConvBlock(res[2], n_filters=21, size=3) down_3 = BoundaryRefinementBlock(down_3, n_filters=21, kernel_size=[3, 3]) down_3 = tf.add(down_3, down_4) down_3 = BoundaryRefinementBlock(down_3, n_filters=21, kernel_size=[3, 3]) down_3 = ConvUpscaleBlock(down_3, n_filters=21, kernel_size=[3, 3], scale=2) down_2 = GlobalConvBlock(res[3], n_filters=21, size=3) down_2 = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3]) down_2 = tf.add(down_2, down_3) down_2 = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3]) down_2 = ConvUpscaleBlock(down_2, n_filters=21, kernel_size=[3, 3], scale=2) net = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3]) net = ConvUpscaleBlock(net, n_filters=21, kernel_size=[3, 3], scale=2) net = BoundaryRefinementBlock(net, n_filters=21, kernel_size=[3, 3]) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net, init_fn
def build_refinenet(inputs, num_classes, preset_model='RefineNet-Res101', weight_decay=1e-5, is_training=True, upscaling_method="bilinear", pretrained_dir="models"): """ Builds the RefineNet model. Arguments: inputs: The input tensor preset_model: Which model you want to use. Select which ResNet model to use for feature extraction num_classes: Number of classes Returns: RefineNet model """ inputs = mean_image_subtraction(inputs) if preset_model == 'RefineNet-Res50': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_50( inputs, is_training=is_training, scope='resnet_v2_50') # RefineNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50')) elif preset_model == 'RefineNet-Res101': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_101( inputs, is_training=is_training, scope='resnet_v2_101') # RefineNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101')) elif preset_model == 'RefineNet-Res152': with slim.arg_scope( resnet_v2.resnet_arg_scope(weight_decay=weight_decay)): logits, end_points = resnet_v2.resnet_v2_152( inputs, is_training=is_training, scope='resnet_v2_152') # RefineNet requires pre-trained ResNet weights init_fn = slim.assign_from_checkpoint_fn( os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152')) else: raise ValueError( "Unsupported ResNet model '%s'. This function only supports ResNet 101 and ResNet 152" % (preset_model)) f = [ end_points['pool5'], end_points['pool4'], end_points['pool3'], end_points['pool2'] ] g = [None, None, None, None] h = [None, None, None, None] for i in range(4): h[i] = slim.conv2d(f[i], 256, 1) g[0] = RefineBlock(high_inputs=None, low_inputs=h[0]) g[1] = RefineBlock(g[0], h[1]) g[2] = RefineBlock(g[1], h[2]) g[3] = RefineBlock(g[2], h[3]) # g[3]=Upsampling(g[3],scale=4) net = g[3] if upscaling_method.lower() == "conv": net = ConvUpscaleBlock(net, 256, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 256) net = ConvUpscaleBlock(net, 128, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 128) net = ConvUpscaleBlock(net, 64, kernel_size=[3, 3], scale=2) net = ConvBlock(net, 64) elif upscaling_method.lower() == "bilinear": net = Upsampling(net, scale=4) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net, init_fn