Ejemplo n.º 1
0
    def __init__(self, num_classes, config, debug=False, seed=None,
                 name='rcnn'):
        super(RCNN, self).__init__(name=name)
        self._num_classes = num_classes
        # List of the fully connected layer sizes used before classifying and
        # adjusting the bounding box.
        self._layer_sizes = config.layer_sizes
        self._activation = get_activation_function(config.activation_function)
        self._dropout_keep_prob = config.dropout_keep_prob
        self._use_mean = config.use_mean
        self._variances = config.target_normalization_variances

        self._rcnn_initializer = get_initializer(
            config.rcnn_initializer, seed=seed
        )
        self._cls_initializer = get_initializer(
            config.cls_initializer, seed=seed
        )
        self._bbox_initializer = get_initializer(
            config.bbox_initializer, seed=seed
        )
        self.regularizer = tf.contrib.layers.l2_regularizer(
            scale=config.l2_regularization_scale)

        self._l1_sigma = config.l1_sigma

        # Debug mode makes the module return more detailed Tensors which can be
        # useful for debugging.
        self._debug = debug
        self._config = config
        self._seed = seed
Ejemplo n.º 2
0
    def __init__(self, num_classes, config, debug=False, seed=None,
                 name='rcnn'):
        super(RCNN, self).__init__(name=name)
        self._num_classes = num_classes
        # List of the fully connected layer sizes used before classifying and
        # adjusting the bounding box.
        self._layer_sizes = config.layer_sizes
        self._activation = get_activation_function(config.activation_function)
        self._dropout_keep_prob = config.dropout_keep_prob
        self._use_mean = config.use_mean

        self._rcnn_initializer = get_initializer(
            config.rcnn_initializer, seed=seed
        )
        self._cls_initializer = get_initializer(
            config.cls_initializer, seed=seed
        )
        self._bbox_initializer = get_initializer(
            config.bbox_initializer, seed=seed
        )
        self.regularizer = tf.contrib.layers.l2_regularizer(
            scale=config.l2_regularization_scale)

        self._l1_sigma = config.l1_sigma

        # Debug mode makes the module return more detailed Tensors which can be
        # useful for debugging.
        self._debug = debug
        self._config = config
        self._seed = seed
Ejemplo n.º 3
0
    def __init__(self, num_anchors, config, debug=False, seed=None, name="rpn"):
        """RPN - Region Proposal Network.

        Given an image (as feature map) and a fixed set of anchors, the RPN
        will learn weights to adjust those anchors so they better look like the
        ground truth objects, as well as scoring them by "objectness" (ie. how
        likely they are to be an object vs background).

        The final result will be a set of rectangular boxes ("proposals"),
        each associated with an objectness score.

        Note: this module can be used independently of Faster R-CNN.
        """
        super(RPN, self).__init__(name=name)
        self._num_anchors = num_anchors
        self._num_channels = config.num_channels
        self._kernel_shape = config.kernel_shape

        self._debug = debug
        self._seed = seed

        self._rpn_initializer = get_initializer(config.rpn_initializer, seed=seed)
        # According to Faster RCNN paper we need to initialize layers with
        # "from a zero-mean Gaussian distribution with standard deviation 0.01
        self._cls_initializer = get_initializer(config.cls_initializer, seed=seed)
        self._bbox_initializer = get_initializer(config.bbox_initializer, seed=seed)
        self._regularizer = tf_slim.regularizers.l2_regularizer(scale=config.l2_regularization_scale)

        self._l1_sigma = config.l1_sigma

        # We could use normal relu without any problems.
        self._rpn_activation = get_activation_function(config.activation_function)

        self._config = config
Ejemplo n.º 4
0
    def __init__(self, num_classes, config, debug=False, seed=None, name="rcnn"):
        super(RCNN, self).__init__(name=name)
        self._num_classes = num_classes
        # List of the fully connected layer sizes used before classifying and
        # adjusting the bounding box.
        self._layer_sizes = config.layer_sizes
        self._activation = get_activation_function(config.activation_function)
        self._dropout_keep_prob = config.dropout_keep_prob
        self._use_mean = config.use_mean
        self._variances = config.target_normalization_variances

        self._rcnn_initializer = get_initializer(config.rcnn_initializer, seed=seed)
        self._cls_initializer = get_initializer(config.cls_initializer, seed=seed)
        self._bbox_initializer = get_initializer(config.bbox_initializer, seed=seed)
        self.regularizer = tf.contrib.layers.l2_regularizer(
            scale=config.l2_regularization_scale
        )
        self._l1_sigma = config.l1_sigma
        loss_config = config.loss
        if loss_config.type == CROSS_ENTROPY:
            self.loss_type = CROSS_ENTROPY
            if "weight" not in loss_config:
                self.loss_weight = 1
            else:
                self.loss_weight = loss_config.weight
        elif loss_config.type == FOCAL:
            self.loss_type = FOCAL
            self.focal_gamma = loss_config.get("focal_gamma")
        tf.logging.info("Classification loss type in RCNN is {}".format(self.loss_type))
        # Debug mode makes the module return more detailed Tensors which can be
        # useful for debugging.
        self._debug = debug
        self._config = config
        self._seed = seed
Ejemplo n.º 5
0
    def __init__(self, num_anchors, config, debug=False, seed=None,
                 name='rpn'):
        """RPN - Region Proposal Network.

        Given an image (as feature map) and a fixed set of anchors, the RPN
        will learn weights to adjust those anchors so they better look like the
        ground truth objects, as well as scoring them by "objectness" (ie. how
        likely they are to be an object vs background).

        The final result will be a set of rectangular boxes ("proposals"),
        each associated with an objectness score.

        Note: this module can be used independently of Faster R-CNN.
        """
        super(RPN, self).__init__(name=name)
        self._num_anchors = num_anchors
        self._num_channels = config.num_channels
        self._kernel_shape = config.kernel_shape

        self._debug = debug
        self._seed = seed

        self._rpn_initializer = get_initializer(
            config.rpn_initializer, seed=seed
        )
        # According to Faster RCNN paper we need to initialize layers with
        # "from a zero-mean Gaussian distribution with standard deviation 0.01
        self._cls_initializer = get_initializer(
            config.cls_initializer, seed=seed
        )
        self._bbox_initializer = get_initializer(
            config.bbox_initializer, seed=seed
        )
        self._regularizer = tf.contrib.layers.l2_regularizer(
            scale=config.l2_regularization_scale
        )

        self._l1_sigma = config.l1_sigma

        # We could use normal relu without any problems.
        self._rpn_activation = get_activation_function(
            config.activation_function
        )

        self._config = config
Ejemplo n.º 6
0
    def __init__(self,
                 num_anchors,
                 config,
                 debug=False,
                 seed=None,
                 name='rpn'):
        """RPN - Region Proposal Network

        This module works almost independently from the Faster RCNN module.
        It instantiates its own submodules and calculates its own loss,
        and can be used on its own.

        """
        super(RPN, self).__init__(name=name)
        self._num_anchors = num_anchors
        self._num_channels = config.num_channels
        self._kernel_shape = config.kernel_shape

        self._debug = debug
        self._seed = seed

        self._rpn_initializer = get_initializer(config.rpn_initializer,
                                                seed=seed)
        # According to Faster RCNN paper we need to initialize layers with
        # "from a zero-mean Gaussian distribution with standard deviation 0.01
        self._cls_initializer = get_initializer(config.cls_initializer,
                                                seed=seed)
        self._bbox_initializer = get_initializer(config.bbox_initializer,
                                                 seed=seed)
        self._regularizer = tf.contrib.layers.l2_regularizer(
            scale=config.l2_regularization_scale)

        # We could use normal relu without any problems.

        self._rpn_activation = get_activation_function(
            config.activation_function)

        self._config = config