Пример #1
0
  def _build(self, inputs, is_training=True, test_local_stats=True):
    """Assembles the `ConvNet2D` and connects it to the graph.

    Args:
      inputs: A 4D Tensor of shape `[batch_size, input_height, input_width,
        input_channels]`.
      is_training: Boolean to indicate to `snt.BatchNorm` if we are
        currently training. By default `True`.
      test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
        normalization should  use local batch statistics at test time.
        By default `True`.

    Returns:
      A 4D Tensor of shape `[batch_size, output_height, output_width,
        output_channels[-1]]`.
    """
    self._input_shape = tuple(inputs.get_shape().as_list())
    net = inputs

    final_index = len(self._layers) - 1
    for i, layer in enumerate(self._layers):
      net = layer(net)

      if i != final_index or self._activate_final:
        if self._use_batch_norm:
          bn = batch_norm.BatchNorm(name="batch_norm_{}".format(i),
                                    **self._batch_norm_config)
          net = bn(net,
                   is_training=is_training,
                   test_local_stats=test_local_stats)

        net = self._activation(net)

    return net
Пример #2
0
    def _build(self, inputs, is_training=None, test_local_stats=True):
        """Assembles the `ConvNet2D` and connects it to the graph.

        Args:
          inputs: A 4D Tensor of shape `[batch_size, input_height, input_width,
            input_channels]`.
          is_training: Boolean to indicate to `snt.BatchNorm` if we are
            currently training. Must be specified explicitly if `use_batchnorm` is
            `True`.
          test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
            normalization should  use local batch statistics at test time.
            By default `True`.

        Returns:
          A 4D Tensor of shape `[batch_size, output_height, output_width,
            output_channels[-1]]`.

        Raises:
          ValueError: If `is_training` is not explicitly specified when using
            batch normalization.
        """
        if self._use_batch_norm and is_training is None:
            raise ValueError("Boolean is_training flag must be explicitly specified " "when using batch normalization.")

        self._input_shape = tuple(inputs.get_shape().as_list())
        net = inputs

        final_index = len(self._layers) - 1
        for i, layer in enumerate(self._layers):
            net = layer(net)

            if i != final_index or self._activate_final:
                if self._use_batch_norm:
                    bn = batch_norm.BatchNorm(
                        name="batch_norm_{}".format(i),
                        **self._batch_norm_config,
                    )
                    net = bn(
                        net,
                        is_training=is_training,
                        test_local_stats=test_local_stats,
                    )

                net = self._activation(net)

        return net
Пример #3
0
 def create_batch_norm():
   return batch_norm.BatchNorm(offset=False, scale=False)(
       inputs, is_training, test_local_stats)
Пример #4
0
    def _build(self,
               inputs,
               keep_prob=None,
               is_training=True,
               test_local_stats=True):
        """Connects the AlexNet module into the graph.

    Args:
      inputs: A Tensor of size [batch_size, input_height, input_width,
        input_channels], representing a batch of input images.
      keep_prob: A scalar Tensor representing the dropout keep probability.
      is_training: Boolean to indicate to `snt.BatchNorm` if we are
        currently training. By default `True`.
      test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
        normalization should  use local batch statistics at test time.
        By default `True`.

    Returns:
      A Tensor of size [batch_size, output_size], where `output_size` depends
      on the mode the network was constructed in.

    Raises:
      base.IncompatibleShapeError: If any of the input image dimensions
        (input_height, input_width) are too small for the given network mode.
    """

        input_shape = inputs.get_shape().as_list()

        if input_shape[1] < self._min_size or input_shape[2] < self._min_size:
            raise base.IncompatibleShapeError(
                "Image shape too small: ({:d}, {:d}) < {:d}".format(
                    input_shape[1], input_shape[2], self._min_size))

        net = inputs

        for i, params in enumerate(self._conv_layers):
            output_channels, conv_params, max_pooling = params

            kernel_size, stride = conv_params

            conv_mod = conv.Conv2D(name="conv_{}".format(i),
                                   output_channels=output_channels,
                                   kernel_shape=kernel_size,
                                   stride=stride,
                                   padding=conv.VALID,
                                   initializers=self._initializers,
                                   partitioners=self._partitioners,
                                   regularizers=self._regularizers)

            if not self.is_connected:
                self._conv_modules.append(conv_mod)

            net = conv_mod(net)

            if self._use_batch_norm:
                bn = batch_norm.BatchNorm(**self._batch_norm_config)
                net = bn(net, is_training, test_local_stats)

            net = tf.nn.relu(net)

            if max_pooling is not None:
                pooling_kernel_size, pooling_stride = max_pooling
                net = tf.nn.max_pool(
                    net,
                    ksize=[1, pooling_kernel_size, pooling_kernel_size, 1],
                    strides=[1, pooling_stride, pooling_stride, 1],
                    padding=conv.VALID)

        net = basic.BatchFlatten(name="flatten")(net)

        for i, output_size in enumerate(self._fc_layers):
            linear_mod = basic.Linear(name="fc_{}".format(i),
                                      output_size=output_size,
                                      partitioners=self._partitioners)

            if not self.is_connected:
                self._linear_modules.append(linear_mod)

            net = linear_mod(net)

            if self._use_batch_norm:
                bn = batch_norm.BatchNorm(**self._batch_norm_config)
                net = bn(net, is_training, test_local_stats)

            net = tf.nn.relu(net)

            if keep_prob is not None:
                net = tf.nn.dropout(net, keep_prob=keep_prob)

        return net
Пример #5
0
    def _build(self,
               inputs,
               keep_prob=None,
               is_training=None,
               test_local_stats=True):
        """Connects the AlexNet module into the graph.

    The is_training flag only controls the batch norm settings, if `False` it
    does not force no dropout by overriding any input `keep_prob`. To avoid any
    confusion this may cause, if `is_training=False` and `keep_prob` would cause
    dropout to be applied, an error is thrown.

    Args:
      inputs: A Tensor of size [batch_size, input_height, input_width,
        input_channels], representing a batch of input images.
      keep_prob: A scalar Tensor representing the dropout keep probability.
        When `is_training=False` this must be None or 1 to give no dropout.
      is_training: Boolean to indicate if we are currently training. Must be
          specified if batch normalization or dropout is used.
      test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
        normalization should  use local batch statistics at test time.
        By default `True`.

    Returns:
      A Tensor of size [batch_size, output_size], where `output_size` depends
      on the mode the network was constructed in.

    Raises:
      base.IncompatibleShapeError: If any of the input image dimensions
        (input_height, input_width) are too small for the given network mode.
      ValueError: If `keep_prob` is not None or 1 when `is_training=False`.
      ValueError: If `is_training` is not explicitly specified when using
        batch normalization.
    """
        # Check input shape
        if (self._use_batch_norm
                or keep_prob is not None) and is_training is None:
            raise ValueError(
                "Boolean is_training flag must be explicitly specified "
                "when using batch normalization or dropout.")

        input_shape = inputs.get_shape().as_list()
        if input_shape[1] < self._min_size or input_shape[2] < self._min_size:
            raise base.IncompatibleShapeError(
                "Image shape too small: ({:d}, {:d}) < {:d}".format(
                    input_shape[1], input_shape[2], self._min_size))

        net = inputs

        # Check keep prob
        if keep_prob is not None:
            valid_inputs = tf.logical_or(is_training, tf.equal(keep_prob, 1.))
            keep_prob_check = tf.assert_equal(
                valid_inputs,
                True,
                message=
                "Input `keep_prob` must be None or 1 if `is_training=False`.")
            with tf.control_dependencies([keep_prob_check]):
                net = tf.identity(net)

        for i, params in enumerate(self._conv_layers):
            output_channels, conv_params, max_pooling = params

            kernel_size, stride = conv_params

            conv_mod = conv.Conv2D(name="conv_{}".format(i),
                                   output_channels=output_channels,
                                   kernel_shape=kernel_size,
                                   stride=stride,
                                   padding=conv.VALID,
                                   initializers=self._initializers,
                                   partitioners=self._partitioners,
                                   regularizers=self._regularizers)

            if not self.is_connected:
                self._conv_modules.append(conv_mod)

            net = conv_mod(net)

            if self._use_batch_norm:
                bn = batch_norm.BatchNorm(**self._batch_norm_config)
                net = bn(net, is_training, test_local_stats)

            net = tf.nn.relu(net)

            if max_pooling is not None:
                pooling_kernel_size, pooling_stride = max_pooling
                net = tf.nn.max_pool(
                    net,
                    ksize=[1, pooling_kernel_size, pooling_kernel_size, 1],
                    strides=[1, pooling_stride, pooling_stride, 1],
                    padding=conv.VALID)

        net = basic.BatchFlatten(name="flatten")(net)

        for i, output_size in enumerate(self._fc_layers):
            linear_mod = basic.Linear(name="fc_{}".format(i),
                                      output_size=output_size,
                                      initializers=self._initializers,
                                      partitioners=self._partitioners)

            if not self.is_connected:
                self._linear_modules.append(linear_mod)

            net = linear_mod(net)

            if self._use_batch_norm and self._bn_on_fc_layers:
                bn = batch_norm.BatchNorm(**self._batch_norm_config)
                net = bn(net, is_training, test_local_stats)

            net = tf.nn.relu(net)

            if keep_prob is not None:
                net = tf.nn.dropout(net, keep_prob=keep_prob)

        return net