示例#1
0
    def __init__(self, hparams=None):
        ClassifierBase.__init__(self, hparams)

        with tf.variable_scope(self.variable_scope):
            encoder_hparams = utils.dict_fetch(
                hparams, Conv1DEncoder.default_hparams())
            self._encoder = Conv1DEncoder(hparams=encoder_hparams)

            # Add an additional dense layer if needed
            self._num_classes = self._hparams.num_classes
            if self._num_classes > 0:
                if self._hparams.num_dense_layers <= 0:
                    self._encoder.append_layer({"type": "Flatten"})

                logit_kwargs = self._hparams.logit_layer_kwargs
                if logit_kwargs is None:
                    logit_kwargs = {}
                elif not isinstance(logit_kwargs, HParams):
                    raise ValueError(
                        "hparams['logit_layer_kwargs'] must be a dict.")
                else:
                    logit_kwargs = logit_kwargs.todict()
                logit_kwargs.update({"units": self._num_classes})
                if 'name' not in logit_kwargs:
                    logit_kwargs['name'] = "logit_layer"

                self._encoder.append_layer(
                    {"type": "Dense", "kwargs": logit_kwargs})
示例#2
0
    def test_unknown_seq_length(self):
        """Tests use of pooling layer when the seq_length dimension of inputs
        is `None`.
        """
        encoder_1 = Conv1DEncoder()
        inputs_1 = tf.placeholder(tf.float32, [64, None, 300])
        outputs_1 = encoder_1(inputs_1)
        self.assertEqual(outputs_1.shape, [64, 128])

        hparams = {
            # Conv layers
            "num_conv_layers": 2,
            "filters": 128,
            "kernel_size": [[3, 4, 5], 4],
            # Pooling layers
            "pooling": "AveragePooling",
            "pool_size": [2, None],
            # Dense layers
            "num_dense_layers": 1,
            "dense_size": 10,
        }
        encoder = Conv1DEncoder(hparams)
        # nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
        self.assertEqual(len(encoder.layers), 1 + 1 + 1 + 1 + 1 + 1)
        self.assertTrue(
            isinstance(encoder.layer_by_name('pool_2'),
                       tx.core.AverageReducePooling1D))

        inputs = tf.placeholder(tf.float32, [64, None, 300])
        outputs = encoder(inputs)
        self.assertEqual(outputs.shape, [64, 10])

        hparams_2 = {
            # Conv layers
            "num_conv_layers": 1,
            "filters": 128,
            "kernel_size": 4,
            "other_conv_kwargs": {
                'data_format': 'channels_first'
            },
            # Pooling layers
            "pooling": "MaxPooling",
            "other_pool_kwargs": {
                'data_format': 'channels_first'
            },
            # Dense layers
            "num_dense_layers": 1,
            "dense_size": 10,
        }
        encoder_2 = Conv1DEncoder(hparams_2)
        inputs_2 = tf.placeholder(tf.float32, [64, 300, None])
        outputs_2 = encoder_2(inputs_2)
        self.assertEqual(outputs_2.shape, [64, 10])
示例#3
0
    def test_encode(self):
        """Tests encode.
        """
        encoder_1 = Conv1DEncoder()
        self.assertEqual(len(encoder_1.layers), 4)
        self.assertTrue(
            isinstance(encoder_1.layer_by_name("conv_pool_1"),
                       tx.core.MergeLayer))
        for layer in encoder_1.layers[0].layers:
            self.assertTrue(isinstance(layer, tx.core.SequentialLayer))

        inputs_1 = tf.ones([64, 16, 300], tf.float32)
        outputs_1 = encoder_1(inputs_1)
        self.assertEqual(outputs_1.shape, [64, 128])

        hparams = {
            # Conv layers
            "num_conv_layers": 2,
            "filters": 128,
            "kernel_size": [[3, 4, 5], 4],
            "other_conv_kwargs": {
                "padding": "same"
            },
            # Pooling layers
            "pooling": "AveragePooling",
            "pool_size": 2,
            "pool_strides": 1,
            # Dense layers
            "num_dense_layers": 3,
            "dense_size": [128, 128, 10],
            "dense_activation": "relu",
            "other_dense_kwargs": {
                "use_bias": False
            },
            # Dropout
            "dropout_conv": [0, 1, 2],
            "dropout_dense": 2
        }
        encoder_2 = Conv1DEncoder(hparams)
        # nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
        self.assertEqual(len(encoder_2.layers), 1 + 1 + 1 + 3 + 4 + 1)
        self.assertTrue(
            isinstance(encoder_2.layer_by_name("conv_pool_1"),
                       tx.core.MergeLayer))
        for layer in encoder_2.layers[1].layers:
            self.assertTrue(isinstance(layer, tx.core.SequentialLayer))

        inputs_2 = tf.ones([64, 16, 300], tf.float32)
        outputs_2 = encoder_2(inputs_2)
        self.assertEqual(outputs_2.shape, [64, 10])
示例#4
0
    def default_hparams():
        """Returns a dictionary of hyperparameters with default values.

        .. code-block:: python

            {
                # (1) Same hyperparameters as in Conv1DEncoder
                ...

                # (2) Additional hyperparameters
                "num_classes": 2,
                "logit_layer_kwargs": {
                    "use_bias": False
                },
                "name": "conv1d_classifier"
            }

        Here:

        1. Same hyperparameters as in :class:`~texar.tf.modules.Conv1DEncoder`.
        See the :meth:`~texar.tf.modules.Conv1DEncoder.default_hparams`.
        An instance of Conv1DEncoder is created for feature extraction.

        2. Additional hyperparameters:

            "num_classes": int
                Number of classes:

                - If **`> 0`**, an additional :tf_main:`Dense <layers/Dense>` \
                layer is appended to the encoder to compute the logits over \
                classes.
                - If **`<= 0`**, no dense layer is appended. The number of \
                classes is assumed to be the final dense layer size of the \
                encoder.

            "logit_layer_kwargs": dict
                Keyword arguments for the logit Dense layer constructor,
                except for argument "units" which is set to "num_classes".
                Ignored if no extra logit layer is appended.

            "name": str
                Name of the classifier.
        """
        hparams = Conv1DEncoder.default_hparams()
        hparams.update({
            "name": "conv1d_classifier",
            "num_classes": 2,  #set to <=0 to avoid appending output layer
            "logit_layer_kwargs": {
                "use_bias": False
            }
        })
        return hparams