Esempio n. 1
0
    def _build_network(self, features, labels, mode):
        """Build a network that returns loss and logits from features and labels."""
        is_training = (mode == tf.estimator.ModeKeys.TRAIN)
        is_predict = (mode == tf.estimator.ModeKeys.PREDICT)
        steps_per_epoch = float(
            NUM_TRAIN_IMAGES) / self.hparams.train_batch_size
        num_total_steps = int(steps_per_epoch * self.hparams.num_epochs)
        if getattr(self.hparams, 'num_total_steps', None) is None:
            self.hparams.add_hparam('num_total_steps', num_total_steps)
        else:
            self.hparams.set_hparam('num_total_steps', num_total_steps)

        hparams = copy.deepcopy(self.hparams)
        if not is_training:
            hparams.set_hparam('use_aux_head', False)

        tf.logging.info('Amoeba net received hparams for {}:\n{}'.format(
            'training' if is_training else 'eval', formatted_hparams(hparams)))

        logits, end_points = model_builder.build_network(
            features, LABEL_CLASSES, is_training, hparams)

        if not is_predict:
            labels = tf.one_hot(labels, LABEL_CLASSES)
            loss = model_builder.build_softmax_loss(
                logits,
                end_points,
                labels,
                label_smoothing=hparams.label_smoothing,
                add_summary=False)

        # Calculate and print the number of trainable parameters in the model
        if is_training:
            flops = model_builder.compute_flops_per_example(
                hparams.train_batch_size)
        else:
            flops = model_builder.compute_flops_per_example(
                hparams.eval_batch_size)
        tf.logging.info('number of flops: {}'.format(flops))
        self._calc_num_trainable_params()

        if is_predict:
            return None, logits

        return loss, logits
Esempio n. 2
0
    def _module_fn(is_training):
        """A module_fn for use with hub.create_module_spec().

    Args:
      is_training: a boolean, passed to the config.network_fn.
          This is meant to control whether batch norm, dropout etc. are built
          in training or inference mode for this graph version.

    Raises:
      ValueError: if network_fn outputs are not as expected.
    """
        # Set up the module input, and attach an ImageModuleInfo about it.
        with tf.name_scope('hub_input'):
            default_size = (hparams.image_size, ) * 2
            image_module_info = hub.ImageModuleInfo()
            size_info = image_module_info.default_image_size
            size_info.height, size_info.width = default_size
            # TODO(b/72731449): Support variable input size.
            shape = (None, ) + default_size + (3, )
            images = tf.placeholder(dtype=tf.float32,
                                    shape=shape,
                                    name='images')
            hub.attach_image_module_info(image_module_info)
            # The input is expected to have RGB color values in the range [0,1]
            # and gets converted for AmoebaNet to the Inception-style range [-1,+1].
            scaled_images = tf.multiply(images, 2.0)
            scaled_images = tf.subtract(scaled_images, 1.0)

        # Build the net.
        logits, end_points = model_builder.build_network(
            scaled_images, num_classes, is_training, hparams)

        with tf.name_scope('hub_output'):
            # Extract the feature_vectors output.
            try:
                feature_vectors = end_points['global_pool']
            except KeyError:
                tf.logging.error('Valid keys of end_points are:',
                                 ', '.join(end_points))
                raise
            with tf.name_scope('feature_vector'):
                if feature_vectors.shape.ndims != 2:
                    raise ValueError(
                        'Wrong rank (expected 2 after squeeze) '
                        'in feature_vectors:', feature_vectors)
            # Extract the logits output (if applicable).
            if num_classes:
                with tf.name_scope('classification'):
                    if logits.shape.ndims != 2:
                        raise ValueError('Wrong rank (expected 2) in logits:',
                                         logits)

        # Add named signatures.
        hub.add_signature('image_feature_vector', dict(images=images),
                          dict(end_points, default=feature_vectors))
        if num_classes:
            hub.add_signature('image_classification', dict(images=images),
                              dict(end_points, default=logits))
        # Add the default signature.
        if num_classes:
            hub.add_signature('default', dict(images=images),
                              dict(default=logits))
        else:
            hub.add_signature('default', dict(images=images),
                              dict(default=feature_vectors))