Esempio n. 1
0
    def __init__(self,
                 in_channels: int,
                 in_features: Optional[int] = None,
                 hparams: Optional[Union[HParams, Dict[str, Any]]] = None):
        super().__init__(hparams=hparams)

        encoder_hparams = utils.dict_fetch(hparams,
                                           Conv1DEncoder.default_hparams())
        self._encoder = Conv1DEncoder(in_channels=in_channels,
                                      in_features=in_features,
                                      hparams=encoder_hparams)

        # Add an additional dense layer if needed
        self._num_classes = self._hparams.num_classes
        if self._num_classes > 0:
            if self._hparams.num_dense_layers <= 0:
                self._encoder.append_layer({"type": "Flatten"})

            logit_kwargs = self._hparams.logit_layer_kwargs
            if logit_kwargs is None:
                logit_kwargs = {}
            elif not isinstance(logit_kwargs, HParams):
                raise ValueError(
                    "hparams['logit_layer_kwargs'] must be a dict.")
            else:
                logit_kwargs = logit_kwargs.todict()
            logit_kwargs.update({"out_features": self._num_classes})

            self._encoder.append_layer({
                "type": "Linear",
                "kwargs": logit_kwargs
            })
Esempio n. 2
0
    def test_encode(self):
        r"""Tests encode.
        """
        inputs_1 = torch.ones([128, 32, 300])
        encoder_1 = Conv1DEncoder(in_channels=inputs_1.size(1),
                                  in_features=inputs_1.size(2))
        self.assertEqual(len(encoder_1.layers), 4)
        self.assertIsInstance(encoder_1.layer_by_name("MergeLayer"),
                              MergeLayer)
        for layer in encoder_1.layers[0].layers:
            self.assertIsInstance(layer, nn.Sequential)

        outputs_1 = encoder_1(inputs_1)
        self.assertEqual(outputs_1.size(), torch.Size([128, 256]))
        self.assertEqual(outputs_1.size(-1), encoder_1.output_size)

        inputs_2 = torch.ones([128, 64, 300])
        hparams = {
            # Conv layers
            "num_conv_layers": 2,
            "out_channels": 128,
            "kernel_size": [[3, 4, 5], 4],
            "other_conv_kwargs": {
                "padding": 0
            },
            # Pooling layers
            "pooling": "AvgPool1d",
            "pool_size": 2,
            "pool_stride": 1,
            # Dense layers
            "num_dense_layers": 3,
            "out_features": [128, 128, 10],
            "dense_activation": "ReLU",
            "other_dense_kwargs": None,
            # Dropout
            "dropout_conv": [0, 1, 2],
            "dropout_dense": 2
        }
        network_2 = Conv1DEncoder(in_channels=inputs_2.size(1),
                                  in_features=inputs_2.size(2),
                                  hparams=hparams)
        # dropout-merge-dropout-conv-avgpool-dropout-flatten-
        # (Sequential(Linear,ReLU))-(Sequential(Linear,ReLU))-dropout-linear
        self.assertEqual(len(network_2.layers), 1 + 1 + 1 + 3 + 4 + 1)
        self.assertIsInstance(network_2.layer_by_name("MergeLayer"),
                              MergeLayer)
        for layer in network_2.layers[1].layers:
            self.assertIsInstance(layer, nn.Sequential)

        outputs_2 = network_2(inputs_2)
        self.assertEqual(outputs_2.size(), torch.Size([128, 10]))
        self.assertEqual(outputs_2.size(-1), network_2.output_size)
    def default_hparams() -> Dict[str, Any]:
        r"""Returns a dictionary of hyperparameters with default values.

        .. code-block:: python

            {
                # (1) Same hyperparameters as in Conv1DEncoder
                ...

                # (2) Additional hyperparameters
                "num_classes": 2,
                "logit_layer_kwargs": {
                    "use_bias": False
                },
                "name": "conv1d_classifier"
            }

        Here:

        1. Same hyperparameters as in
           :class:`~texar.torch.modules.Conv1DEncoder`.
           See the :meth:`~texar.torch.modules.Conv1DEncoder.default_hparams`.
           An instance of :class:`~texar.torch.modules.Conv1DEncoder` is created
           for feature extraction.

        2. Additional hyperparameters:

           `"num_classes"`: int
               Number of classes:

               - If `> 0`, an additional :torch_nn:`Linear`
                 layer is appended to the encoder to compute the logits over
                 classes.

               - If `<= 0`, no dense layer is appended. The number of
                 classes is assumed to be equal to ``out_features`` of the
                 final dense layer size of the encoder.

           `"logit_layer_kwargs"`: dict
               Keyword arguments for the logit :torch_nn:`Linear` layer
               constructor, except for argument ``out_features`` which is set
               to ``"num_classes"``. Ignored if no extra logit layer is
               appended.

           `"name"`: str
               Name of the classifier.
        """
        hparams = Conv1DEncoder.default_hparams()
        hparams.update({
            "name": "conv1d_classifier",
            "num_classes": 2,  # set to <=0 to avoid appending output layer
            "logit_layer_kwargs": {
                "in_features": hparams["out_features"],
                "bias": True
            }
        })
        return hparams
    def __init__(self,
                 in_channels: int,
                 in_features: Optional[int] = None,
                 hparams: Optional[Union[HParams, Dict[str, Any]]] = None):
        super().__init__(hparams=hparams)

        encoder_hparams = utils.dict_fetch(hparams,
                                           Conv1DEncoder.default_hparams())
        self._encoder = Conv1DEncoder(in_channels=in_channels,
                                      in_features=in_features,
                                      hparams=encoder_hparams)

        # Add an additional dense layer if needed
        self._num_classes = self._hparams.num_classes
        if self._num_classes > 0:
            if self._hparams.num_dense_layers <= 0:
                if in_features is None:
                    raise ValueError("'in_features' is required for logits "
                                     "layer when 'num_dense_layers' <= 0")
                self._encoder.append_layer({"type": "Flatten"})
                ones = torch.ones(1, in_channels, in_features)
                input_size = self._encoder._infer_dense_layer_input_size(ones)  # pylint: disable=protected-access
                self.hparams.logit_layer_kwargs.in_features = input_size[1]

            logit_kwargs = self._hparams.logit_layer_kwargs
            if logit_kwargs is None:
                logit_kwargs = {}
            elif not isinstance(logit_kwargs, HParams):
                raise ValueError(
                    "hparams['logit_layer_kwargs'] must be a dict.")
            else:
                logit_kwargs = logit_kwargs.todict()
            logit_kwargs.update({"out_features": self._num_classes})

            self._encoder.append_layer({
                "type": "Linear",
                "kwargs": logit_kwargs
            })