Exemplo n.º 1
0
    def _network(self):
        # There shouldn't be any gt_boxes if in evaluation mode
        if self.eval_mode is True:
            assert self.gt_boxes is None, \
                'Evaluation mode should not have ground truth boxes (or else what are you detecting for?)'

        _num_anchors = len(self.anchor_scales)*3

        rpn_layers = Layers(self.featureMaps)

        with tf.variable_scope('rpn'):
            # Spatial windowing
            for i in range(len(cfg.RPN_OUTPUT_CHANNELS)):
                rpn_layers.conv2d(filter_size=cfg.RPN_FILTER_SIZES[i], output_channels=cfg.RPN_OUTPUT_CHANNELS[i])
                
            features = rpn_layers.get_output()

            with tf.variable_scope('cls'):
                # Box-classification layer (objectness)
                self.rpn_bbox_cls_layers = Layers(features)
                self.rpn_bbox_cls_layers.conv2d(filter_size=1, output_channels=_num_anchors*2, activation_fn=None)

            with tf.variable_scope('target'):
                # Only calculate targets in train mode. No ground truth boxes in evaluation mode
                if self.eval_mode is False:
                    # Anchor Target Layer (anchors and deltas)
                    rpn_cls_score = self.rpn_bbox_cls_layers.get_output()
                    self.rpn_labels, self.rpn_bbox_targets, self.rpn_bbox_inside_weights, self.rpn_bbox_outside_weights = \
                        anchor_target_layer(rpn_cls_score=rpn_cls_score, gt_boxes=self.gt_boxes, im_dims=self.im_dims,
                                            _feat_stride=self._feat_stride, anchor_scales=self.anchor_scales)

            with tf.variable_scope('bbox'):
                # Bounding-Box regression layer (bounding box predictions)
                self.rpn_bbox_pred_layers = Layers(features)
                self.rpn_bbox_pred_layers.conv2d(filter_size=1, output_channels=_num_anchors*4, activation_fn=None)
Exemplo n.º 2
0
    def _conv_layers(self,x):
        conv_layers = Layers(x)
        
        # Convolutional layers
        res_blocks = [1,3,4,23,3]
        output_channels = [64,256,512,1024,2048]
        
        with tf.variable_scope('scale0'):
            conv_layers.conv2d(filter_size=7,output_channels=output_channels[0],stride=2,padding='SAME',b_value=None)
            conv_layers.maxpool(k=3)
        with tf.variable_scope('scale1'):
            conv_layers.res_layer(filter_size=3, output_channels=output_channels[1], stride=2)
            for block in range(res_blocks[1]-1):
                conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[1], stride=1)
        with tf.variable_scope('scale2'):
            conv_layers.res_layer(filter_size=3, output_channels=output_channels[2], stride=2)
            for block in range(res_blocks[2]-1):
                conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[2], stride=1)
        with tf.variable_scope('scale3'):
            conv_layers.res_layer(filter_size=3, output_channels=output_channels[3], stride=2)
            for block in range(res_blocks[3]-1):
                conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[3], stride=1)
        with tf.variable_scope('scale4'):
            conv_layers.res_layer(filter_size=3, output_channels=output_channels[4], stride=2)
            for block in range(res_blocks[4]-1):
                conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[4], stride=1)
        
        conv_layers.avgpool(globe=True)
        
        # Fully Connected Layer
        conv_layers.fc(output_nodes=10)

        return conv_layers.get_output()
    def _conv_layers(self, x):
        conv_layers = Layers(x)

        # Convolutional layers
        res_blocks = [1, 3, 4, 23, 3]
        output_channels = [64, 256, 512, 1024, 2048]

        with tf.variable_scope('scale0'):
            conv_layers.conv2d(filter_size=7,
                               output_channels=output_channels[0],
                               stride=2,
                               padding='SAME',
                               b_value=None)
            conv_layers.maxpool(k=3)
        with tf.variable_scope('scale1'):
            conv_layers.res_layer(filter_size=3,
                                  output_channels=output_channels[1],
                                  stride=2)
            for block in range(res_blocks[1] - 1):
                conv_layers.conv_layers.res_layer(
                    filter_size=3,
                    output_channels=output_channels[1],
                    stride=1)
        with tf.variable_scope('scale2'):
            conv_layers.res_layer(filter_size=3,
                                  output_channels=output_channels[2],
                                  stride=2)
            for block in range(res_blocks[2] - 1):
                conv_layers.conv_layers.res_layer(
                    filter_size=3,
                    output_channels=output_channels[2],
                    stride=1)
        with tf.variable_scope('scale3'):
            conv_layers.res_layer(filter_size=3,
                                  output_channels=output_channels[3],
                                  stride=2)
            for block in range(res_blocks[3] - 1):
                conv_layers.conv_layers.res_layer(
                    filter_size=3,
                    output_channels=output_channels[3],
                    stride=1)
        with tf.variable_scope('scale4'):
            conv_layers.res_layer(filter_size=3,
                                  output_channels=output_channels[4],
                                  stride=2)
            for block in range(res_blocks[4] - 1):
                conv_layers.conv_layers.res_layer(
                    filter_size=3,
                    output_channels=output_channels[4],
                    stride=1)

        conv_layers.avgpool(globe=True)

        # Fully Connected Layer
        conv_layers.fc(output_nodes=10)

        return conv_layers.get_output()
Exemplo n.º 4
0
    def _network(self, x):
        conv_layers = Layers(x)

        # Convolutional layers
        with tf.variable_scope('resnet101'):
            res_blocks = [1, 3, 4, 23, 3]
            output_channels = [64, 256, 512, 1024, 2048]

            with tf.variable_scope('scale0'):
                conv_layers.conv2d(filter_size=7,
                                   output_channels=output_channels[0],
                                   stride=2,
                                   padding='SAME',
                                   b_value=None)
                conv_layers.maxpool(k=3)
            with tf.variable_scope('scale1'):
                conv_layers.res_layer(filter_size=3,
                                      output_channels=output_channels[1],
                                      stride=2)
                for block in range(res_blocks[1] - 1):
                    conv_layers.conv_layers.res_layer(
                        filter_size=3,
                        output_channels=output_channels[1],
                        stride=1)
            with tf.variable_scope('scale2'):
                conv_layers.res_layer(filter_size=3,
                                      output_channels=output_channels[2],
                                      stride=2)
                for block in range(res_blocks[2] - 1):
                    conv_layers.conv_layers.res_layer(
                        filter_size=3,
                        output_channels=output_channels[2],
                        stride=1)
            with tf.variable_scope('scale3'):
                conv_layers.res_layer(filter_size=3,
                                      output_channels=output_channels[3],
                                      stride=2)
                for block in range(res_blocks[3] - 1):
                    conv_layers.conv_layers.res_layer(
                        filter_size=3,
                        output_channels=output_channels[3],
                        stride=1)
            with tf.variable_scope('scale4'):
                conv_layers.res_layer(filter_size=3,
                                      output_channels=output_channels[4],
                                      stride=2)
                for block in range(res_blocks[4] - 1):
                    conv_layers.conv_layers.res_layer(
                        filter_size=3,
                        output_channels=output_channels[4],
                        stride=1)

        return conv_layers
class rpn:
    '''
    Region Proposal Network (RPN): Takes convolutional feature maps (TensorBase 
    Layers object) from the last layer and proposes bounding boxes for objects.
    '''
    def __init__(self,featureMaps,gt_boxes,im_dims,flags):
        self.featureMaps = featureMaps
        self.gt_boxes = gt_boxes
        self.im_dims = im_dims
        self.flags = flags
        self._network()
        
    def _network(self):
        _num_anchors = len(self.flags['anchor_scales'])*3    
        
        rpn_layers = Layers(self.featureMaps)
        
        with tf.variable_scope('rpn'):
            # Spatial windowing
            rpn_layers.conv2d(filter_size=3,output_channels=512)
            features = rpn_layers.get_output()
            
            # Box-classification layer (objectness)
            self.rpn_bbox_cls_layers = Layers(features)
            self.rpn_bbox_cls_layers.conv2d(filter_size=1,output_channels=_num_anchors*2,activation_fn=None)        
            
            # Anchor Target Layer (anchors and deltas)
            self.rpn_cls_score = self.rpn_bbox_cls_layers.get_output()
            self.rpn_labels,self.rpn_bbox_targets,self.rpn_bbox_inside_weights,self.rpn_bbox_outside_weights = \
                anchor_target_layer(rpn_cls_score=self.rpn_cls_score,gt_boxes=self.gt_boxes,im_dims=self.im_dims,anchor_scales=self.flags['anchor_scales'])       
            
            # Bounding-Box regression layer (bounding box predictions)
            self.rpn_bbox_pred_layers = Layers(features)
            self.rpn_bbox_pred_layers.conv2d(filter_size=1,output_channels=_num_anchors*4,activation_fn=None)

    def get_rpn_bbox_cls(self):
        return self.rpn_bbox_cls_layers.get_output()
        
    def get_rpn_bbox_pred(self):
        return self.rpn_bbox_pred_layers.get_output()
    
    def get_rpn_labels(self):
        return self.rpn_labels
        
    def get_rpn_bbox_targets(self):
        return self.bbox_targets
        
    def get_rpn_bbox_inside_weights(self):
        return self.rpn_bbox_inside_weights
        
    def get_rpn_bbox_outside_weights(self):
        return self.rpn_bbox_outside_weights
Exemplo n.º 6
0
    def _network(self, x):
        conv_layers = Layers(x)

        # Convolutional layers
        scope = 'resnet' + str(self.depth)
        with tf.variable_scope(scope):
            res_blocks = self.architectures[self.depth]
            output_channels = [64, 256, 512, 1024, 2048]

            with tf.variable_scope('scale0'):
                conv_layers.conv2d(filter_size=7,
                                   output_channels=output_channels[0],
                                   stride=2,
                                   padding='SAME',
                                   b_value=None)  # Downsample
                conv_layers.maxpool(k=3, s=2)  # Downsample
            with tf.variable_scope('scale1'):
                for block in range(res_blocks[1]):
                    conv_layers.res_layer(filter_size=3,
                                          output_channels=output_channels[1],
                                          stride=1)
            with tf.variable_scope('scale2'):
                conv_layers.res_layer(filter_size=3,
                                      output_channels=output_channels[2],
                                      stride=2)  # Downsample
                for block in range(res_blocks[2] - 1):
                    conv_layers.res_layer(filter_size=3,
                                          output_channels=output_channels[2],
                                          stride=1)
            with tf.variable_scope('scale3'):
                conv_layers.res_layer(filter_size=3,
                                      output_channels=output_channels[3],
                                      stride=2)  # Downsample
                for block in range(res_blocks[3] - 1):
                    conv_layers.res_layer(filter_size=3,
                                          output_channels=output_channels[3],
                                          stride=1)
            with tf.variable_scope('scale4'):
                conv_layers.res_layer(filter_size=3,
                                      output_channels=output_channels[4],
                                      stride=2)  # Downsample
                for block in range(res_blocks[4] - 1):
                    conv_layers.res_layer(filter_size=3,
                                          output_channels=output_channels[4],
                                          stride=1)

        return conv_layers
Exemplo n.º 7
0
    def _network(self, x):
        conv_layers = Layers(x)

        # Make sure that number of layers is consistent
        assert len(self.output_channels) == self.depth
        assert len(self.strides) == self.depth

        # Convolutional layers
        scope = 'convnet' + str(self.depth)
        with tf.variable_scope(scope):
            for l in range(self.depth):
                conv_layers.conv2d(filter_size=self.filter_sizes[l],
                                   output_channels=self.output_channels[l],
                                   stride=self.strides[l],
                                   padding='SAME',
                                   b_value=None)

        return conv_layers
Exemplo n.º 8
0
 def _network(self, x):
     conv_layers = Layers(x)
     
     # Make sure that number of layers is consistent
     assert len(self.output_channels) == self.depth
     assert len(self.strides) == self.depth
         
     # Convolutional layers
     scope = 'convnet' + str(self.depth)
     with tf.variable_scope(scope):
         for l in range(self.depth):
             conv_layers.conv2d(filter_size=self.filter_sizes[l], 
                                output_channels=self.output_channels[l], 
                                stride=self.strides[l],
                                padding='SAME',
                                b_value=None)
     
     return conv_layers
Exemplo n.º 9
0
    def _network(self):
        # There shouldn't be any gt_boxes if in evaluation mode
        if self.eval_mode is True:
            assert self.gt_boxes is None, \
                'Evaluation mode should not have ground truth boxes (or else what are you detecting for?)'

        _num_anchors = len(self.anchor_scales) * 1

        rpn_layers = Layers(self.featureMaps)

        with tf.variable_scope('rpn'):
            # Spatial windowing
            for i in range(len(cfg.RPN_OUTPUT_CHANNELS)):
                rpn_layers.conv2d(filter_size=cfg.RPN_FILTER_SIZES[i],
                                  output_channels=cfg.RPN_OUTPUT_CHANNELS[i])

            features = rpn_layers.get_output()

            with tf.variable_scope('cls'):
                # Box-classification layer (objectness)
                self.rpn_bbox_cls_layers = Layers(features)
                self.rpn_bbox_cls_layers.conv2d(filter_size=1,
                                                output_channels=_num_anchors *
                                                2,
                                                activation_fn=None)

            with tf.variable_scope('target'):
                # Only calculate targets in train mode. No ground truth boxes in evaluation mode
                if self.eval_mode is False:
                    print(anchor_target_layer)
                    # Anchor Target Layer (anchors and deltas)
                    rpn_cls_score = self.rpn_bbox_cls_layers.get_output()
                    self.rpn_labels, self.rpn_bbox_targets, self.rpn_bbox_inside_weights, self.rpn_bbox_outside_weights = \
                        anchor_target_layer(rpn_cls_score=rpn_cls_score, gt_boxes=self.gt_boxes, im_dims=self.im_dims,
                                            _feat_stride=self._feat_stride, anchor_scales=self.anchor_scales)

            with tf.variable_scope('bbox'):
                # Bounding-Box regression layer (bounding box predictions)
                self.rpn_bbox_pred_layers = Layers(features)
                self.rpn_bbox_pred_layers.conv2d(filter_size=1,
                                                 output_channels=_num_anchors *
                                                 4,
                                                 activation_fn=None)
 def _network(self):
     _num_anchors = len(self.flags['anchor_scales'])*3    
     
     rpn_layers = Layers(self.featureMaps)
     
     with tf.variable_scope('rpn'):
         # Spatial windowing
         rpn_layers.conv2d(filter_size=3,output_channels=512)
         features = rpn_layers.get_output()
         
         # Box-classification layer (objectness)
         self.rpn_bbox_cls_layers = Layers(features)
         self.rpn_bbox_cls_layers.conv2d(filter_size=1,output_channels=_num_anchors*2,activation_fn=None)        
         
         # Anchor Target Layer (anchors and deltas)
         self.rpn_cls_score = self.rpn_bbox_cls_layers.get_output()
         self.rpn_labels,self.rpn_bbox_targets,self.rpn_bbox_inside_weights,self.rpn_bbox_outside_weights = \
             anchor_target_layer(rpn_cls_score=self.rpn_cls_score,gt_boxes=self.gt_boxes,im_dims=self.im_dims,anchor_scales=self.flags['anchor_scales'])       
         
         # Bounding-Box regression layer (bounding box predictions)
         self.rpn_bbox_pred_layers = Layers(features)
         self.rpn_bbox_pred_layers.conv2d(filter_size=1,output_channels=_num_anchors*4,activation_fn=None)
class rpn:
    '''
    Region Proposal Network (RPN): From the convolutional feature maps 
    (TensorBase Layers object) of the last layer, generate bounding boxes 
    relative to anchor boxes and give an "objectness" score to each
    '''
    def __init__(self,featureMaps,gt_boxes,im_dims,_feat_stride,flags):
        self.featureMaps = featureMaps
        self.gt_boxes = gt_boxes
        self.im_dims = im_dims
        self._feat_stride = _feat_stride
        self.flags = flags
        self._network()
        
    def _network(self):
        _num_anchors = len(self.flags['anchor_scales'])*3    
        
        rpn_layers = Layers(self.featureMaps)
        
        with tf.variable_scope('rpn'):
            # Spatial windowing
            rpn_layers.conv2d(filter_size=3,output_channels=512)
            features = rpn_layers.get_output()
            
            with tf.variable_scope('bbox'):
            # Box-classification layer (objectness)
                self.rpn_bbox_cls_layers = Layers(features)
                self.rpn_bbox_cls_layers.conv2d(filter_size=1,output_channels=_num_anchors*2,activation_fn=None)        
            
                # Anchor Target Layer (anchors and deltas)
                rpn_cls_score = self.rpn_bbox_cls_layers.get_output()
                self.rpn_labels,self.rpn_bbox_targets,self.rpn_bbox_inside_weights,self.rpn_bbox_outside_weights = \
                    anchor_target_layer(rpn_cls_score=rpn_cls_score,gt_boxes=self.gt_boxes,im_dims=self.im_dims,_feat_stride=self._feat_stride,anchor_scales=self.flags['anchor_scales'])       
            
            with tf.variable_scope('cls'):
            # Bounding-Box regression layer (bounding box predictions)
                self.rpn_bbox_pred_layers = Layers(features)
                self.rpn_bbox_pred_layers.conv2d(filter_size=1,output_channels=_num_anchors*4,activation_fn=None)

    # Get functions
    def get_rpn_cls_score(self):
        return self.rpn_bbox_cls_layers.get_output()

    def get_rpn_labels(self):
        return self.rpn_labels
        
    def get_rpn_bbox_pred(self):
        return self.rpn_bbox_pred_layers.get_output()
        
    def get_rpn_bbox_targets(self):
        return self.rpn_bbox_targets
        
    def get_rpn_bbox_inside_weights(self):
        return self.rpn_bbox_inside_weights
        
    def get_rpn_bbox_outside_weights(self):
        return self.rpn_bbox_outside_weights
    
    # Loss functions
    def get_rpn_cls_loss(self):
        rpn_cls_score = self.get_rpn_cls_score()
        rpn_labels = self.get_rpn_labels()
        return rpn_cls_loss(rpn_cls_score,rpn_labels)
    
    def get_rpn_bbox_loss(self):
        rpn_bbox_pred = self.get_rpn_bbox_pred()
        rpn_bbox_targets = self.get_rpn_bbox_targets()
        rpn_bbox_inside_weights = self.get_rpn_bbox_inside_weights()
        rpn_bbox_outside_weights = self.get_rpn_bbox_outside_weights()
        return rpn_bbox_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights)
Exemplo n.º 12
0
class rpn:
    '''
    Region Proposal Network (RPN): From the convolutional feature maps
    (TensorBase Layers object) of the last layer, generate bounding boxes
    relative to anchor boxes and give an "objectness" score to each

    In evaluation mode (eval_mode==True), gt_boxes should be None.
    '''
    def __init__(self, featureMaps, gt_boxes, im_dims, _feat_stride,
                 eval_mode):
        self.featureMaps = featureMaps
        self.gt_boxes = gt_boxes
        self.im_dims = im_dims
        self._feat_stride = _feat_stride
        self.anchor_scales = cfg.RPN_ANCHOR_SCALES
        self.eval_mode = eval_mode

        self._network()

    def _network(self):
        # There shouldn't be any gt_boxes if in evaluation mode
        if self.eval_mode is True:
            assert self.gt_boxes is None, \
                'Evaluation mode should not have ground truth boxes (or else what are you detecting for?)'

        _num_anchors = len(self.anchor_scales) * 3

        rpn_layers = Layers(self.featureMaps)

        with tf.variable_scope('rpn'):
            # Spatial windowing
            for i in range(len(cfg.RPN_OUTPUT_CHANNELS)):
                rpn_layers.conv2d(filter_size=cfg.RPN_FILTER_SIZES[i],
                                  output_channels=cfg.RPN_OUTPUT_CHANNELS[i])

            features = rpn_layers.get_output()

            with tf.variable_scope('cls'):
                # Box-classification layer (objectness)
                self.rpn_bbox_cls_layers = Layers(features)
                self.rpn_bbox_cls_layers.conv2d(filter_size=1,
                                                output_channels=_num_anchors *
                                                2,
                                                activation_fn=None)

            with tf.variable_scope('target'):
                # Only calculate targets in train mode. No ground truth boxes in evaluation mode
                if self.eval_mode is False:
                    # Anchor Target Layer (anchors and deltas)
                    rpn_cls_score = self.rpn_bbox_cls_layers.get_output()
                    self.rpn_labels, self.rpn_bbox_targets, self.rpn_bbox_inside_weights, self.rpn_bbox_outside_weights = \
                        anchor_target_layer(rpn_cls_score=rpn_cls_score, gt_boxes=self.gt_boxes, im_dims=self.im_dims,
                                            _feat_stride=self._feat_stride, anchor_scales=self.anchor_scales)

            with tf.variable_scope('bbox'):
                # Bounding-Box regression layer (bounding box predictions)
                self.rpn_bbox_pred_layers = Layers(features)
                self.rpn_bbox_pred_layers.conv2d(filter_size=1,
                                                 output_channels=_num_anchors *
                                                 4,
                                                 activation_fn=None)

    # Get functions
    def get_rpn_cls_score(self):
        return self.rpn_bbox_cls_layers.get_output()

    def get_rpn_labels(self):
        assert self.eval_mode is False, 'No RPN labels without ground truth boxes'
        return self.rpn_labels

    def get_rpn_bbox_pred(self):
        return self.rpn_bbox_pred_layers.get_output()

    def get_rpn_bbox_targets(self):
        assert self.eval_mode is False, 'No RPN bounding box targets without ground truth boxes'
        return self.rpn_bbox_targets

    def get_rpn_bbox_inside_weights(self):
        assert self.eval_mode is False, 'No RPN inside weights without ground truth boxes'
        return self.rpn_bbox_inside_weights

    def get_rpn_bbox_outside_weights(self):
        assert self.eval_mode is False, 'No RPN outside weights without ground truth boxes'
        return self.rpn_bbox_outside_weights

    # Loss functions
    def get_rpn_cls_loss(self):
        assert self.eval_mode is False, 'No RPN cls loss without ground truth boxes'
        rpn_cls_score = self.get_rpn_cls_score()
        rpn_labels = self.get_rpn_labels()
        return rpn_cls_loss(rpn_cls_score, rpn_labels)

    def get_rpn_bbox_loss(self):
        assert self.eval_mode is False, 'No RPN bbox loss without ground truth boxes'
        rpn_bbox_pred = self.get_rpn_bbox_pred()
        rpn_bbox_targets = self.get_rpn_bbox_targets()
        rpn_bbox_inside_weights = self.get_rpn_bbox_inside_weights()
        rpn_bbox_outside_weights = self.get_rpn_bbox_outside_weights()
        return rpn_bbox_loss(rpn_bbox_pred, rpn_bbox_targets,
                             rpn_bbox_inside_weights, rpn_bbox_outside_weights)
Exemplo n.º 13
0
class rpn:
    '''
    Region Proposal Network (RPN): From the convolutional feature maps
    (TensorBase Layers object) of the last layer, generate bounding boxes
    relative to anchor boxes and give an "objectness" score to each

    In evaluation mode (eval_mode==True), gt_boxes should be None.
    '''

    def __init__(self, featureMaps, gt_boxes, im_dims, _feat_stride, eval_mode):
        self.featureMaps = featureMaps
        self.gt_boxes = gt_boxes
        self.im_dims = im_dims
        self._feat_stride = _feat_stride
        self.anchor_scales = cfg.RPN_ANCHOR_SCALES
        self.eval_mode = eval_mode
        
        self._network()

    def _network(self):
        # There shouldn't be any gt_boxes if in evaluation mode
        if self.eval_mode is True:
            assert self.gt_boxes is None, \
                'Evaluation mode should not have ground truth boxes (or else what are you detecting for?)'

        _num_anchors = len(self.anchor_scales)*3

        rpn_layers = Layers(self.featureMaps)

        with tf.variable_scope('rpn'):
            # Spatial windowing
            for i in range(len(cfg.RPN_OUTPUT_CHANNELS)):
                rpn_layers.conv2d(filter_size=cfg.RPN_FILTER_SIZES[i], output_channels=cfg.RPN_OUTPUT_CHANNELS[i])
                
            features = rpn_layers.get_output()

            with tf.variable_scope('cls'):
                # Box-classification layer (objectness)
                self.rpn_bbox_cls_layers = Layers(features)
                self.rpn_bbox_cls_layers.conv2d(filter_size=1, output_channels=_num_anchors*2, activation_fn=None)

            with tf.variable_scope('target'):
                # Only calculate targets in train mode. No ground truth boxes in evaluation mode
                if self.eval_mode is False:
                    # Anchor Target Layer (anchors and deltas)
                    rpn_cls_score = self.rpn_bbox_cls_layers.get_output()
                    self.rpn_labels, self.rpn_bbox_targets, self.rpn_bbox_inside_weights, self.rpn_bbox_outside_weights = \
                        anchor_target_layer(rpn_cls_score=rpn_cls_score, gt_boxes=self.gt_boxes, im_dims=self.im_dims,
                                            _feat_stride=self._feat_stride, anchor_scales=self.anchor_scales)

            with tf.variable_scope('bbox'):
                # Bounding-Box regression layer (bounding box predictions)
                self.rpn_bbox_pred_layers = Layers(features)
                self.rpn_bbox_pred_layers.conv2d(filter_size=1, output_channels=_num_anchors*4, activation_fn=None)

    # Get functions
    def get_rpn_cls_score(self):
        return self.rpn_bbox_cls_layers.get_output()

    def get_rpn_labels(self):
        assert self.eval_mode is False, 'No RPN labels without ground truth boxes'
        return self.rpn_labels

    def get_rpn_bbox_pred(self):
        return self.rpn_bbox_pred_layers.get_output()

    def get_rpn_bbox_targets(self):
        assert self.eval_mode is False, 'No RPN bounding box targets without ground truth boxes'
        return self.rpn_bbox_targets

    def get_rpn_bbox_inside_weights(self):
        assert self.eval_mode is False, 'No RPN inside weights without ground truth boxes'
        return self.rpn_bbox_inside_weights

    def get_rpn_bbox_outside_weights(self):
        assert self.eval_mode is False, 'No RPN outside weights without ground truth boxes'
        return self.rpn_bbox_outside_weights

    # Loss functions
    def get_rpn_cls_loss(self):
        assert self.eval_mode is False, 'No RPN cls loss without ground truth boxes'
        rpn_cls_score = self.get_rpn_cls_score()
        rpn_labels = self.get_rpn_labels()
        return rpn_cls_loss(rpn_cls_score, rpn_labels)

    def get_rpn_bbox_loss(self):
        assert self.eval_mode is False, 'No RPN bbox loss without ground truth boxes'
        rpn_bbox_pred = self.get_rpn_bbox_pred()
        rpn_bbox_targets = self.get_rpn_bbox_targets()
        rpn_bbox_inside_weights = self.get_rpn_bbox_inside_weights()
        rpn_bbox_outside_weights = self.get_rpn_bbox_outside_weights()
        return rpn_bbox_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights)